From 58e462781928dd9bba2c3d6c1bf6be016d2b901d Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:56:27 +0800 Subject: [PATCH 01/28] support session scope and add comment --- pkg/session/session.go | 2 +- pkg/sessionctx/variable/session.go | 3 +++ pkg/sessionctx/variable/sysvar.go | 10 +++++++++- pkg/sessionctx/variable/tidb_vars.go | 4 ++-- 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/pkg/session/session.go b/pkg/session/session.go index 5c5ccf36c749a..1b45bdfbda2b4 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -3212,7 +3212,7 @@ func splitAndScatterTable(store kv.Storage, tableIDs []int64) { for _, id := range tableIDs { regionIDs = append(regionIDs, ddl.SplitRecordRegion(ctxWithTimeout, s, id, id, variable.DefTiDBScatterRegion)) } - if variable.DefTiDBScatterRegion { + if variable.DefTiDBScatterRegion != "" { ddl.WaitScatterRegionFinish(ctxWithTimeout, s, regionIDs...) } cancel() diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index c214de45631be..3072f3f0e1dc2 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -1715,6 +1715,9 @@ type SessionVars struct { // SharedLockPromotion indicates whether the `select for lock` statements would be executed as the // `select for update` statements which do acquire pessimsitic locks. SharedLockPromotion bool + + // ScatterRegion will scatter the regions for DDLs when it is "table" or "global", "" indicates not trigger scatter. + ScatterRegion string } // GetSessionVars implements the `SessionVarsProvider` interface. diff --git a/pkg/sessionctx/variable/sysvar.go b/pkg/sessionctx/variable/sysvar.go index cb14aa58984d0..cbcd885b3a2e1 100644 --- a/pkg/sessionctx/variable/sysvar.go +++ b/pkg/sessionctx/variable/sysvar.go @@ -775,7 +775,15 @@ var defaultSysVars = []*SysVar{ SetMaxDeltaSchemaCount(TidbOptInt64(val, DefTiDBMaxDeltaSchemaCount)) return nil }}, - {Scope: ScopeGlobal, Name: TiDBScatterRegion, Value: BoolToOnOff(DefTiDBScatterRegion), Type: TypeBool}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBScatterRegion, Value: DefTiDBScatterRegion, PossibleValues: []string{"", "table", "global"}, Type: TypeEnum, + SetSession: func(vars *SessionVars, val string) error { + vars.ScatterRegion = val + return nil + }, + GetSession: func(vars *SessionVars) (string, error) { + return vars.ScatterRegion, nil + }, + }, {Scope: ScopeGlobal, Name: TiDBEnableStmtSummary, Value: BoolToOnOff(DefTiDBEnableStmtSummary), Type: TypeBool, AllowEmpty: true, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { return stmtsummaryv2.SetEnabled(TiDBOptOn(val)) diff --git a/pkg/sessionctx/variable/tidb_vars.go b/pkg/sessionctx/variable/tidb_vars.go index ffb89f4251034..ff4587d0a2e4f 100644 --- a/pkg/sessionctx/variable/tidb_vars.go +++ b/pkg/sessionctx/variable/tidb_vars.go @@ -534,7 +534,7 @@ const ( // deltaSchemaInfos is a queue that maintains the history of schema changes. TiDBMaxDeltaSchemaCount = "tidb_max_delta_schema_count" - // TiDBScatterRegion will scatter the regions for DDLs when it is ON. + // TiDBScatterRegion will scatter the regions for DDLs when it is "table" or "global", "" indicates not trigger scatter. TiDBScatterRegion = "tidb_scatter_region" // TiDBWaitSplitRegionFinish defines the split region behaviour is sync or async. @@ -1334,7 +1334,7 @@ const ( DefTiDBSkipIsolationLevelCheck = false DefTiDBExpensiveQueryTimeThreshold = 60 // 60s DefTiDBExpensiveTxnTimeThreshold = 60 * 10 // 10 minutes - DefTiDBScatterRegion = false + DefTiDBScatterRegion = "" DefTiDBWaitSplitRegionFinish = true DefWaitSplitRegionTimeout = 300 // 300s DefTiDBEnableNoopFuncs = Off From 9312f45582599c83d296e30c1bc37f5757424698 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Thu, 19 Sep 2024 15:03:45 +0800 Subject: [PATCH 02/28] support scatter region in `global` level --- pkg/ddl/executor.go | 18 +++++------ pkg/ddl/partition.go | 2 +- pkg/ddl/split_region.go | 66 +++++++++++++++++++++++++++++++---------- 3 files changed, 60 insertions(+), 26 deletions(-) diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 1ba9c63bc5975..cb3c80b183c73 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -1378,21 +1378,21 @@ func preSplitAndScatter(ctx sessionctx.Context, store kv.Storage, tbInfo *model. return } var ( - preSplit func() - scatterRegion bool + preSplit func() + scatterScope string ) - val, err := ctx.GetSessionVars().GetGlobalSystemVar(context.Background(), variable.TiDBScatterRegion) - if err != nil { - logutil.DDLLogger().Warn("won't scatter region", zap.Error(err)) + val, ok := ctx.GetSessionVars().GetSystemVar(variable.TiDBScatterRegion) + if !ok { + logutil.DDLLogger().Warn("won't scatter region") } else { - scatterRegion = variable.TiDBOptOn(val) + scatterScope = val } if len(parts) > 0 { - preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scatterRegion) } + preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scatterScope) } } else { - preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterRegion) } + preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterScope) } } - if scatterRegion { + if scatterScope != "" { preSplit() } else { go preSplit() diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index 8c619d91c2e40..029c66e2d3591 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -3224,7 +3224,7 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, t *meta.Meta, job *mo // and we will soon start writing to the new partitions. if s, ok := jobCtx.store.(kv.SplittableStore); ok && s != nil { // partInfo only contains the AddingPartitions - splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, true) + splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, "table") } // Assume we cannot have more than MaxUint64 rows, set the progress to 1/10 of that. diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index a921ae1245348..988e566576418 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -30,7 +30,7 @@ import ( "go.uber.org/zap" ) -func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatter bool) { +func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatterScope string) { // Max partition count is 8192, should we sample and just choose some partitions to split? regionIDs := make([]uint64, 0, len(parts)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) @@ -38,34 +38,34 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { for _, def := range parts { - regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatter)...) + regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatterScope)...) } } else { for _, def := range parts { - regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, def.ID, tbInfo.ID, scatter)) + regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, def.ID, tbInfo.ID, scatterScope)) } } - if scatter { + if scatterScope != "" { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { +func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatterScope string) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) var regionIDs []uint64 if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { - regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatter) + regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatterScope) } else { - regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scatter)) + regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scatterScope)) } - if scatter { + if scatterScope != "" { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatter bool) []uint64 { +func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope string) []uint64 { // Example: // sharding_bits = 4 // PreSplitRegions = 2 @@ -107,20 +107,54 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS key := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) splitTableKeys = append(splitTableKeys, key) } - var err error - regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, &tbInfo.ID) + var ( + err error + scatter bool + // PD controls the scope of scatter region through `tableId`. + // `nil` indicates that PD will scatter region at cluster level. + tableID *int64 + ) + switch scatterScope { + case "table": + scatter = true + tableID = &tbInfo.ID + case "global": + scatter = true + tableID = nil + default: + scatter = false + tableID = &tbInfo.ID + } + regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, tableID) if err != nil { logutil.DDLLogger().Warn("pre split some table regions failed", zap.Stringer("table", tbInfo.Name), zap.Int("successful region count", len(regionIDs)), zap.Error(err)) } - regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter)...) + regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter, tableID)...) return regionIDs } // SplitRecordRegion is to split region in store by table prefix. -func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatter bool) uint64 { +func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatterScope string) uint64 { tableStartKey := tablecodec.GenTablePrefix(physicalTableID) - regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, &tableID) + var ( + scatter bool + // PD controls the scope of scatter region through `tID`. + // `nil` indicates that PD will scatter region at cluster level. + tID *int64 + ) + switch scatterScope { + case "table": + scatter = true + tID = &tableID + case "global": + scatter = true + tID = nil + default: + scatter = false + tID = &tableID + } + regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, tID) if err != nil { // It will be automatically split by TiKV later. logutil.DDLLogger().Warn("split table region failed", zap.Error(err)) @@ -131,13 +165,13 @@ func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTa return 0 } -func splitIndexRegion(store kv.SplittableStore, tblInfo *model.TableInfo, scatter bool) []uint64 { +func splitIndexRegion(store kv.SplittableStore, tblInfo *model.TableInfo, scatter bool, tableID *int64) []uint64 { splitKeys := make([][]byte, 0, len(tblInfo.Indices)) for _, idx := range tblInfo.Indices { indexPrefix := tablecodec.EncodeTableIndexPrefix(tblInfo.ID, idx.ID) splitKeys = append(splitKeys, indexPrefix) } - regionIDs, err := store.SplitRegions(context.Background(), splitKeys, scatter, &tblInfo.ID) + regionIDs, err := store.SplitRegions(context.Background(), splitKeys, scatter, tableID) if err != nil { logutil.DDLLogger().Warn("pre split some table index regions failed", zap.Stringer("table", tblInfo.Name), zap.Int("successful region count", len(regionIDs)), zap.Error(err)) From 601e21aca143605a3cf9ddfba95dc6c5143367ab Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Thu, 19 Sep 2024 15:05:40 +0800 Subject: [PATCH 03/28] add test --- pkg/ddl/table_split_test.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index c2110d3b6905b..508049693be17 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -67,6 +67,39 @@ func TestTableSplit(t *testing.T) { } } +func TestScatterRegion(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + tk.MustExec("set @@tidb_scatter_region = 'table';") + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("table")) + tk.MustExec("set @@tidb_scatter_region = 'global';") + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("global")) + tk.MustExec("set @@tidb_scatter_region = '';") + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + + tk.MustExec("set global tidb_scatter_region = 'table';") + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("table")) + tk2 = testkit.NewTestKit(t, store) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("table")) + + tk.MustExec("set global tidb_scatter_region = 'global';") + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("global")) + tk2 = testkit.NewTestKit(t, store) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("global")) + + tk.MustExec("set global tidb_scatter_region = '';") + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("")) + tk2 = testkit.NewTestKit(t, store) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + + err := tk.ExecToErr("set @@tidb_scatter_region = 'test';") + require.ErrorContains(t, err, "Variable 'tidb_scatter_region' can't be set to the value of 'test'") +} + type kvStore interface { GetRegionCache() *tikv.RegionCache } From 082d6a89754e2373ac1bdd195bd6ba3e7a6948d0 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Thu, 19 Sep 2024 15:35:51 +0800 Subject: [PATCH 04/28] fix test --- pkg/ddl/table_split_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index 508049693be17..6fe1878e77f81 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -79,22 +79,18 @@ func TestScatterRegion(t *testing.T) { tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("global")) tk.MustExec("set @@tidb_scatter_region = '';") tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) - tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) tk.MustExec("set global tidb_scatter_region = 'table';") tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("table")) + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) tk2 = testkit.NewTestKit(t, store) tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("table")) tk.MustExec("set global tidb_scatter_region = 'global';") tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("global")) - tk2 = testkit.NewTestKit(t, store) - tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("global")) - tk.MustExec("set global tidb_scatter_region = '';") tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("")) - tk2 = testkit.NewTestKit(t, store) - tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) err := tk.ExecToErr("set @@tidb_scatter_region = 'test';") require.ErrorContains(t, err, "Variable 'tidb_scatter_region' can't be set to the value of 'test'") From 38fe283ea3885b86f95aa884a17c553366635e7e Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Thu, 19 Sep 2024 17:47:29 +0800 Subject: [PATCH 05/28] compatible upgrade --- pkg/session/bootstrap.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/session/bootstrap.go b/pkg/session/bootstrap.go index ec74727a89189..584c2ddee3667 100644 --- a/pkg/session/bootstrap.go +++ b/pkg/session/bootstrap.go @@ -1133,11 +1133,15 @@ const ( // version 213 // create `mysql.tidb_pitr_id_map` table version213 = 213 + + // version 214 + // changes variable `tidb_scatter_region` value from ON to "table" and OFF to "". + version214 = 214 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version213 +var currentBootstrapVersion int64 = version214 // DDL owner key's expired time is ManagerSessionTTL seconds, we should wait the time and give more time to have a chance to finish it. var internalSQLTimeout = owner.ManagerSessionTTL + 15 @@ -1306,6 +1310,7 @@ var ( upgradeToVer211, upgradeToVer212, upgradeToVer213, + upgradeToVer214, } ) @@ -3141,6 +3146,15 @@ func upgradeToVer213(s sessiontypes.Session, ver int64) { mustExecute(s, CreatePITRIDMap) } +func upgradeToVer214(s sessiontypes.Session, ver int64) { + if ver >= version213 { + return + } + + mustExecute(s, "UPDATE mysql.global_variables SET VARIABLE_VALUE='' WHERE VARIABLE_NAME = 'tidb_scatter_region' AND VARIABLE_VALUE = 'OFF'") + mustExecute(s, "UPDATE mysql.global_variables SET VARIABLE_VALUE='table' WHERE VARIABLE_NAME = 'tidb_scatter_region' AND VARIABLE_VALUE = 'ON'") +} + // initGlobalVariableIfNotExists initialize a global variable with specific val if it does not exist. func initGlobalVariableIfNotExists(s sessiontypes.Session, name string, val any) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) From 3731bb64b19d6b43aee81bd51da43ea14ea9c4bc Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Fri, 20 Sep 2024 11:22:57 +0800 Subject: [PATCH 06/28] refactor scatterScope to const --- pkg/ddl/executor.go | 12 ++++++------ pkg/ddl/partition.go | 2 +- pkg/ddl/split_region.go | 34 +++++++++++++++++++++------------- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 3beb645a9472d..2bb3e0c5336cc 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -1382,21 +1382,21 @@ func preSplitAndScatter(ctx sessionctx.Context, store kv.Storage, tbInfo *model. return } var ( - preSplit func() - scatterScope string + preSplit func() + scope scatterScope ) val, ok := ctx.GetSessionVars().GetSystemVar(variable.TiDBScatterRegion) if !ok { logutil.DDLLogger().Warn("won't scatter region") } else { - scatterScope = val + scope = scatterScope(val) } if len(parts) > 0 { - preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scatterScope) } + preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scope) } } else { - preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterScope) } + preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scope) } } - if scatterScope != "" { + if scope != scatterOff { preSplit() } else { go preSplit() diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index 92e83f5b171b9..c76dab71cfe04 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -3230,7 +3230,7 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, t *meta.Meta, job *mo // and we will soon start writing to the new partitions. if s, ok := jobCtx.store.(kv.SplittableStore); ok && s != nil { // partInfo only contains the AddingPartitions - splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, "table") + splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, scatterTable) } // Assume we cannot have more than MaxUint64 rows, set the progress to 1/10 of that. diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index 988e566576418..1bb869bc59c35 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -30,7 +30,15 @@ import ( "go.uber.org/zap" ) -func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatterScope string) { +type scatterScope string + +const ( + scatterOff scatterScope = "" + scatterTable scatterScope = "table" + scatterGlobal scatterScope = "global" +) + +func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatterScope scatterScope) { // Max partition count is 8192, should we sample and just choose some partitions to split? regionIDs := make([]uint64, 0, len(parts)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) @@ -45,27 +53,27 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, def.ID, tbInfo.ID, scatterScope)) } } - if scatterScope != "" { + if scatterScope != scatterOff { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatterScope string) { +func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scope scatterScope) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) var regionIDs []uint64 if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { - regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatterScope) + regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scope) } else { - regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scatterScope)) + regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scope)) } - if scatterScope != "" { + if scope != scatterOff { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope string) []uint64 { +func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope scatterScope) []uint64 { // Example: // sharding_bits = 4 // PreSplitRegions = 2 @@ -115,10 +123,10 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS tableID *int64 ) switch scatterScope { - case "table": + case scatterTable: scatter = true tableID = &tbInfo.ID - case "global": + case scatterGlobal: scatter = true tableID = nil default: @@ -135,7 +143,7 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS } // SplitRecordRegion is to split region in store by table prefix. -func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatterScope string) uint64 { +func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scope scatterScope) uint64 { tableStartKey := tablecodec.GenTablePrefix(physicalTableID) var ( scatter bool @@ -143,11 +151,11 @@ func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTa // `nil` indicates that PD will scatter region at cluster level. tID *int64 ) - switch scatterScope { - case "table": + switch scope { + case scatterTable: scatter = true tID = &tableID - case "global": + case scatterGlobal: scatter = true tID = nil default: From 82569743f5455efc04141a98386b7824ea296157 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Fri, 20 Sep 2024 11:32:37 +0800 Subject: [PATCH 07/28] fix test --- pkg/ddl/tests/partition/db_partition_test.go | 4 ++-- pkg/ddl/tests/serial/serial_test.go | 4 ++-- pkg/executor/executor_failpoint_test.go | 2 +- pkg/executor/sample_test.go | 2 +- pkg/executor/test/showtest/show_test.go | 2 +- pkg/executor/test/splittest/split_table_test.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/ddl/tests/partition/db_partition_test.go b/pkg/ddl/tests/partition/db_partition_test.go index a9e554a1ec007..b46a92f3fea31 100644 --- a/pkg/ddl/tests/partition/db_partition_test.go +++ b/pkg/ddl/tests/partition/db_partition_test.go @@ -1216,7 +1216,7 @@ func TestAlterTableTruncatePartitionPreSplitRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region=1;") tk.MustExec("use test;") tk.MustExec("drop table if exists t1;") @@ -1707,7 +1707,7 @@ func TestGlobalIndexShowTableRegions(t *testing.T) { tk.MustExec("set tidb_enable_global_index=default") }() tk.MustExec("drop table if exists p") - tk.MustExec("set @@global.tidb_scatter_region = on") + tk.MustExec("set @@session.tidb_scatter_region = on") tk.MustExec(`create table p (id int, c int, d int, unique key uidx(c)) partition by range (c) ( partition p0 values less than (4), partition p1 values less than (7), diff --git a/pkg/ddl/tests/serial/serial_test.go b/pkg/ddl/tests/serial/serial_test.go index 2004c65fe6823..57ca2273400f1 100644 --- a/pkg/ddl/tests/serial/serial_test.go +++ b/pkg/ddl/tests/serial/serial_test.go @@ -156,7 +156,7 @@ func TestCreateTableWithLike(t *testing.T) { // Test create table like for partition table. atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) tk.MustExec("use test") - tk.MustExec("set @@global.tidb_scatter_region=1") + tk.MustExec("set @@session.tidb_scatter_region=1") tk.MustExec("drop table if exists partition_t") tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") tk.MustExec("drop table if exists t1") @@ -1108,7 +1108,7 @@ func TestAutoRandomWithPreSplitRegion(t *testing.T) { origin := atomic.LoadUint32(&ddl.EnableSplitTableRegion) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, origin) - tk.MustExec("set @@global.tidb_scatter_region=1") + tk.MustExec("set @@session.tidb_scatter_region=1") // Test pre-split table region for auto_random table. tk.MustExec("create table t (a bigint auto_random(2) primary key clustered, b int) pre_split_regions=2") diff --git a/pkg/executor/executor_failpoint_test.go b/pkg/executor/executor_failpoint_test.go index bd823ffa2c14d..d68401ef410d6 100644 --- a/pkg/executor/executor_failpoint_test.go +++ b/pkg/executor/executor_failpoint_test.go @@ -210,7 +210,7 @@ func TestSplitRegionTimeout(t *testing.T) { // Test pre-split with timeout. tk.MustExec("drop table if exists t") - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region=1;") require.NoError(t, failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`)) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) start := time.Now() diff --git a/pkg/executor/sample_test.go b/pkg/executor/sample_test.go index bd3a477807ec2..cbc4ae39aaad6 100644 --- a/pkg/executor/sample_test.go +++ b/pkg/executor/sample_test.go @@ -32,7 +32,7 @@ func createSampleTestkit(t *testing.T, store kv.Storage) *testkit.TestKit { tk.MustExec("drop database if exists test_table_sample;") tk.MustExec("create database test_table_sample;") tk.MustExec("use test_table_sample;") - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region=1;") return tk } diff --git a/pkg/executor/test/showtest/show_test.go b/pkg/executor/test/showtest/show_test.go index 7d57dbcb33e24..3e62de8682d67 100644 --- a/pkg/executor/test/showtest/show_test.go +++ b/pkg/executor/test/showtest/show_test.go @@ -807,7 +807,7 @@ func TestAutoRandomWithLargeSignedShowTableRegions(t *testing.T) { tk.MustExec("drop table if exists t;") tk.MustExec("create table t (a bigint unsigned auto_random primary key clustered);") - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region=1;") // 18446744073709541615 is MaxUint64 - 10000. // 18446744073709551615 is the MaxUint64. tk.MustQuery("split table t between (18446744073709541615) and (18446744073709551615) regions 2;"). diff --git a/pkg/executor/test/splittest/split_table_test.go b/pkg/executor/test/splittest/split_table_test.go index 339fc029d5c9d..d5c498ba94bd0 100644 --- a/pkg/executor/test/splittest/split_table_test.go +++ b/pkg/executor/test/splittest/split_table_test.go @@ -224,7 +224,7 @@ func TestShowTableRegion(t *testing.T) { // Test show table regions for partition table when enable split region when create table. atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region=1;") tk.MustExec("drop table if exists partition_t;") tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") re = tk.MustQuery("show table partition_t regions") From 65f01a8d12c6fea9cfd897be90e62e64092a515d Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Fri, 20 Sep 2024 12:01:26 +0800 Subject: [PATCH 08/28] fix test --- br/pkg/restore/snap_client/systable_restore_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/br/pkg/restore/snap_client/systable_restore_test.go b/br/pkg/restore/snap_client/systable_restore_test.go index 608fa7f57392f..8fd0c868a30de 100644 --- a/br/pkg/restore/snap_client/systable_restore_test.go +++ b/br/pkg/restore/snap_client/systable_restore_test.go @@ -116,5 +116,5 @@ func TestCheckSysTableCompatibility(t *testing.T) { // // The above variables are in the file br/pkg/restore/systable_restore.go func TestMonitorTheSystemTableIncremental(t *testing.T) { - require.Equal(t, int64(213), session.CurrentBootstrapVersion) + require.Equal(t, int64(214), session.CurrentBootstrapVersion) } From 886e679be1fb6b943a3c01feb09c706355bb91d0 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:10:06 +0800 Subject: [PATCH 09/28] fix test --- pkg/ddl/table_split_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index 6fe1878e77f81..5c65bcdb08894 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -45,7 +45,7 @@ func TestTableSplit(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") // Synced split table region. - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set @@session.tidb_scatter_region = 1") tk.MustExec(`create table t_part (a int key) partition by range(a) ( partition p0 values less than (10), partition p1 values less than (20) From 673936da64cf943b37d7948b18199a156c4ed556 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:49:36 +0800 Subject: [PATCH 10/28] fix test --- pkg/ddl/tests/partition/db_partition_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/ddl/tests/partition/db_partition_test.go b/pkg/ddl/tests/partition/db_partition_test.go index b46a92f3fea31..689fca1299983 100644 --- a/pkg/ddl/tests/partition/db_partition_test.go +++ b/pkg/ddl/tests/partition/db_partition_test.go @@ -1707,7 +1707,7 @@ func TestGlobalIndexShowTableRegions(t *testing.T) { tk.MustExec("set tidb_enable_global_index=default") }() tk.MustExec("drop table if exists p") - tk.MustExec("set @@session.tidb_scatter_region = on") + tk.MustExec("set @@session.tidb_scatter_region = 1") tk.MustExec(`create table p (id int, c int, d int, unique key uidx(c)) partition by range (c) ( partition p0 values less than (4), partition p1 values less than (7), From 5979ef6cddebca5d957fdc43133bd1c752d294e1 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Fri, 20 Sep 2024 15:25:18 +0800 Subject: [PATCH 11/28] fix test --- pkg/executor/set_test.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/pkg/executor/set_test.go b/pkg/executor/set_test.go index 4f11f7621230f..6b2af1008c28b 100644 --- a/pkg/executor/set_test.go +++ b/pkg/executor/set_test.go @@ -382,13 +382,18 @@ func TestSetVar(t *testing.T) { tk.MustQuery(`select @@session.tidb_wait_split_region_finish;`).Check(testkit.Rows("0")) // test for tidb_scatter_region - tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("0")) - tk.MustExec("set global tidb_scatter_region = 1") - tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("1")) - tk.MustExec("set global tidb_scatter_region = 0") - tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("0")) - require.Error(t, tk.ExecToErr("set session tidb_scatter_region = 0")) - require.Error(t, tk.ExecToErr(`select @@session.tidb_scatter_region;`)) + tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("")) + tk.MustExec("set global tidb_scatter_region = 'table'") + tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("table")) + tk.MustExec("set global tidb_scatter_region = 'global'") + tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("global")) + tk.MustExec("set session tidb_scatter_region = ''") + tk.MustQuery(`select @@session.tidb_scatter_region;`).Check(testkit.Rows("")) + tk.MustExec("set session tidb_scatter_region = 'table'") + tk.MustQuery(`select @@session.tidb_scatter_region;`).Check(testkit.Rows("table")) + tk.MustExec("set session tidb_scatter_region = 'global'") + tk.MustQuery(`select @@session.tidb_scatter_region;`).Check(testkit.Rows("global")) + require.Error(t, tk.ExecToErr("set session tidb_scatter_region = 'test'")) // test for tidb_wait_split_region_timeout tk.MustQuery(`select @@session.tidb_wait_split_region_timeout;`).Check(testkit.Rows(strconv.Itoa(variable.DefWaitSplitRegionTimeout))) From c52183858652d61fe7eee9881e888f13d995400b Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang Date: Mon, 23 Sep 2024 10:16:46 +0800 Subject: [PATCH 12/28] fix warn log Co-authored-by: Benjamin2037 --- pkg/ddl/executor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 2bb3e0c5336cc..f7ab869f15ac2 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -1387,7 +1387,7 @@ func preSplitAndScatter(ctx sessionctx.Context, store kv.Storage, tbInfo *model. ) val, ok := ctx.GetSessionVars().GetSystemVar(variable.TiDBScatterRegion) if !ok { - logutil.DDLLogger().Warn("won't scatter region") + logutil.DDLLogger().Warn("get system variable met problem, won't scatter region") } else { scope = scatterScope(val) } From 5eeeba97d986a6d4fd975dd721dd489af5464274 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 10:50:16 +0800 Subject: [PATCH 13/28] refactor --- pkg/ddl/split_region.go | 51 +++++++++++++---------------------------- 1 file changed, 16 insertions(+), 35 deletions(-) diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index 1bb869bc59c35..822138eadfebc 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -73,6 +73,20 @@ func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo * } } +// PD controls the scope of scatter region through `tID`. +// 1. nil means PD scatter region at cluster level. +// 2. not nil means PD scatter region at table level when `scatter` is true. +func getScatterConfig(scope scatterScope, tableID int64) (scatter bool, tID *int64) { + switch scope { + case scatterTable: + return true, &tableID + case scatterGlobal: + return true, nil + default: + return false, &tableID + } +} + func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope scatterScope) []uint64 { // Example: // sharding_bits = 4 @@ -115,24 +129,7 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS key := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) splitTableKeys = append(splitTableKeys, key) } - var ( - err error - scatter bool - // PD controls the scope of scatter region through `tableId`. - // `nil` indicates that PD will scatter region at cluster level. - tableID *int64 - ) - switch scatterScope { - case scatterTable: - scatter = true - tableID = &tbInfo.ID - case scatterGlobal: - scatter = true - tableID = nil - default: - scatter = false - tableID = &tbInfo.ID - } + scatter, tableID := getScatterConfig(scatterScope, tbInfo.ID) regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, tableID) if err != nil { logutil.DDLLogger().Warn("pre split some table regions failed", @@ -145,23 +142,7 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS // SplitRecordRegion is to split region in store by table prefix. func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scope scatterScope) uint64 { tableStartKey := tablecodec.GenTablePrefix(physicalTableID) - var ( - scatter bool - // PD controls the scope of scatter region through `tID`. - // `nil` indicates that PD will scatter region at cluster level. - tID *int64 - ) - switch scope { - case scatterTable: - scatter = true - tID = &tableID - case scatterGlobal: - scatter = true - tID = nil - default: - scatter = false - tID = &tableID - } + scatter, tID := getScatterConfig(scope, tableID) regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, tID) if err != nil { // It will be automatically split by TiKV later. From 34e1e7a73ac8e9ce60f53d6f6c19753be868d5ac Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 11:52:33 +0800 Subject: [PATCH 14/28] add comment --- pkg/ddl/partition.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index c76dab71cfe04..66ce54b788131 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -3229,7 +3229,8 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, t *meta.Meta, job *mo // Doing the preSplitAndScatter here, since all checks are completed, // and we will soon start writing to the new partitions. if s, ok := jobCtx.store.(kv.SplittableStore); ok && s != nil { - // partInfo only contains the AddingPartitions + // 1. partInfo only contains the AddingPartitions + // 2. scatterTable control all new split region need waiting for scatter region finish at table level. splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, scatterTable) } From ed32c70ea7220efed4142c96f29cfbebdc2b243b Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 12:29:28 +0800 Subject: [PATCH 15/28] add `tidb_scatter_region` at global level test --- pkg/ddl/table_split_test.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index 5c65bcdb08894..ad151df589a48 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -50,6 +50,14 @@ func TestTableSplit(t *testing.T) { partition p0 values less than (10), partition p1 values less than (20) )`) + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("")) + tk.MustExec("set @@global.tidb_scatter_region = 'table'") + tk = testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t_part_2 (a int key) partition by range(a) ( + partition p0 values less than (10), + partition p1 values less than (20) + )`) defer dom.Close() atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) infoSchema := dom.InfoSchema() @@ -57,7 +65,6 @@ func TestTableSplit(t *testing.T) { tbl, err := infoSchema.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("tidb")) require.NoError(t, err) checkRegionStartWithTableID(t, tbl.Meta().ID, store.(kvStore)) - tbl, err = infoSchema.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_part")) require.NoError(t, err) pi := tbl.Meta().GetPartitionInfo() @@ -65,6 +72,13 @@ func TestTableSplit(t *testing.T) { for _, def := range pi.Definitions { checkRegionStartWithTableID(t, def.ID, store.(kvStore)) } + tbl, err = infoSchema.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_part_2")) + require.NoError(t, err) + pi = tbl.Meta().GetPartitionInfo() + require.NotNil(t, pi) + for _, def := range pi.Definitions { + checkRegionStartWithTableID(t, def.ID, store.(kvStore)) + } } func TestScatterRegion(t *testing.T) { From eb0e81a45be5535572552beeae84d463fc1ebd36 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 12:30:55 +0800 Subject: [PATCH 16/28] revert --- pkg/ddl/table_split_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index ad151df589a48..813c32a663eab 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -65,6 +65,7 @@ func TestTableSplit(t *testing.T) { tbl, err := infoSchema.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("tidb")) require.NoError(t, err) checkRegionStartWithTableID(t, tbl.Meta().ID, store.(kvStore)) + tbl, err = infoSchema.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_part")) require.NoError(t, err) pi := tbl.Meta().GetPartitionInfo() From 318cb179db3a138fa34870a8bb1352565098e92f Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:34:40 +0800 Subject: [PATCH 17/28] validate `tidb_scatter_region` in PossibleValues --- pkg/sessionctx/variable/sysvar.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/sessionctx/variable/sysvar.go b/pkg/sessionctx/variable/sysvar.go index 5d673e3a6c38d..ecb9a8f74d542 100644 --- a/pkg/sessionctx/variable/sysvar.go +++ b/pkg/sessionctx/variable/sysvar.go @@ -775,7 +775,7 @@ var defaultSysVars = []*SysVar{ SetMaxDeltaSchemaCount(TidbOptInt64(val, DefTiDBMaxDeltaSchemaCount)) return nil }}, - {Scope: ScopeGlobal | ScopeSession, Name: TiDBScatterRegion, Value: DefTiDBScatterRegion, PossibleValues: []string{"", "table", "global"}, Type: TypeEnum, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBScatterRegion, Value: DefTiDBScatterRegion, PossibleValues: []string{"", "table", "global"}, Type: TypeStr, SetSession: func(vars *SessionVars, val string) error { vars.ScatterRegion = val return nil @@ -783,6 +783,12 @@ var defaultSysVars = []*SysVar{ GetSession: func(vars *SessionVars) (string, error) { return vars.ScatterRegion, nil }, + Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { + if normalizedValue != "" && normalizedValue != "table" && normalizedValue != "global" { + return "", fmt.Errorf("invalid value for %s, it should be either '%s', '%s' or '%s'", normalizedValue, "", "table", "global") + } + return normalizedValue, nil + }, }, {Scope: ScopeGlobal, Name: TiDBEnableStmtSummary, Value: BoolToOnOff(DefTiDBEnableStmtSummary), Type: TypeBool, AllowEmpty: true, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { From 1294345452d9c5a22858b136338e5f026609dc0a Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:39:40 +0800 Subject: [PATCH 18/28] fix error message --- pkg/sessionctx/variable/sysvar.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sessionctx/variable/sysvar.go b/pkg/sessionctx/variable/sysvar.go index ecb9a8f74d542..0e2c336f9d4be 100644 --- a/pkg/sessionctx/variable/sysvar.go +++ b/pkg/sessionctx/variable/sysvar.go @@ -785,7 +785,7 @@ var defaultSysVars = []*SysVar{ }, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { if normalizedValue != "" && normalizedValue != "table" && normalizedValue != "global" { - return "", fmt.Errorf("invalid value for %s, it should be either '%s', '%s' or '%s'", normalizedValue, "", "table", "global") + return "", fmt.Errorf("invalid value for '%s', it should be either '%s', '%s' or '%s'", normalizedValue, "", "table", "global") } return normalizedValue, nil }, From e297536c58dd1a74b8c9d5936550c5e5b2d33703 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:47:53 +0800 Subject: [PATCH 19/28] adapt case --- pkg/ddl/index_modify_test.go | 8 ++++---- pkg/ddl/table_split_test.go | 4 ++-- pkg/ddl/tests/partition/db_partition_test.go | 4 ++-- pkg/ddl/tests/serial/serial_test.go | 4 ++-- pkg/executor/executor_failpoint_test.go | 2 +- pkg/executor/sample_test.go | 2 +- pkg/executor/test/showtest/show_test.go | 2 +- pkg/executor/test/splittest/split_table_test.go | 6 +++--- tests/integrationtest/r/executor/sample.result | 2 +- tests/integrationtest/t/executor/sample.test | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/ddl/index_modify_test.go b/pkg/ddl/index_modify_test.go index 395007fb4b8d4..9a5c1eed73c8d 100644 --- a/pkg/ddl/index_modify_test.go +++ b/pkg/ddl/index_modify_test.go @@ -180,10 +180,10 @@ func testAddIndex(t *testing.T, tp testAddIndexType, createTableSQL, idxTp strin isTestPartition := (testPartition & tp) > 0 if isTestShardRowID { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") defer func() { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) - tk.MustExec("set global tidb_scatter_region = 0") + tk.MustExec("set global tidb_scatter_region = ''") }() } if isTestPartition { @@ -475,10 +475,10 @@ func testAddIndexWithSplitTable(t *testing.T, createSQL, splitTableSQL string) { hasAutoRandomField := len(splitTableSQL) > 0 if !hasAutoRandomField { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") defer func() { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) - tk.MustExec("set global tidb_scatter_region = 0") + tk.MustExec("set global tidb_scatter_region = ''") }() } tk.MustExec(createSQL) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index 813c32a663eab..227255506a87f 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -45,7 +45,7 @@ func TestTableSplit(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") // Synced split table region. - tk.MustExec("set @@session.tidb_scatter_region = 1") + tk.MustExec("set @@session.tidb_scatter_region = 'table'") tk.MustExec(`create table t_part (a int key) partition by range(a) ( partition p0 values less than (10), partition p1 values less than (20) @@ -108,7 +108,7 @@ func TestScatterRegion(t *testing.T) { tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("")) err := tk.ExecToErr("set @@tidb_scatter_region = 'test';") - require.ErrorContains(t, err, "Variable 'tidb_scatter_region' can't be set to the value of 'test'") + require.ErrorContains(t, err, "invalid value for 'test', it should be either '', 'table' or 'global'") } type kvStore interface { diff --git a/pkg/ddl/tests/partition/db_partition_test.go b/pkg/ddl/tests/partition/db_partition_test.go index 689fca1299983..55ab40bf3b95d 100644 --- a/pkg/ddl/tests/partition/db_partition_test.go +++ b/pkg/ddl/tests/partition/db_partition_test.go @@ -1216,7 +1216,7 @@ func TestAlterTableTruncatePartitionPreSplitRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set @@session.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") tk.MustExec("use test;") tk.MustExec("drop table if exists t1;") @@ -1707,7 +1707,7 @@ func TestGlobalIndexShowTableRegions(t *testing.T) { tk.MustExec("set tidb_enable_global_index=default") }() tk.MustExec("drop table if exists p") - tk.MustExec("set @@session.tidb_scatter_region = 1") + tk.MustExec("set @@session.tidb_scatter_region = 'table'") tk.MustExec(`create table p (id int, c int, d int, unique key uidx(c)) partition by range (c) ( partition p0 values less than (4), partition p1 values less than (7), diff --git a/pkg/ddl/tests/serial/serial_test.go b/pkg/ddl/tests/serial/serial_test.go index 57ca2273400f1..f208f188a7450 100644 --- a/pkg/ddl/tests/serial/serial_test.go +++ b/pkg/ddl/tests/serial/serial_test.go @@ -156,7 +156,7 @@ func TestCreateTableWithLike(t *testing.T) { // Test create table like for partition table. atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) tk.MustExec("use test") - tk.MustExec("set @@session.tidb_scatter_region=1") + tk.MustExec("set @@session.tidb_scatter_region='table'") tk.MustExec("drop table if exists partition_t") tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") tk.MustExec("drop table if exists t1") @@ -1108,7 +1108,7 @@ func TestAutoRandomWithPreSplitRegion(t *testing.T) { origin := atomic.LoadUint32(&ddl.EnableSplitTableRegion) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, origin) - tk.MustExec("set @@session.tidb_scatter_region=1") + tk.MustExec("set @@session.tidb_scatter_region='table'") // Test pre-split table region for auto_random table. tk.MustExec("create table t (a bigint auto_random(2) primary key clustered, b int) pre_split_regions=2") diff --git a/pkg/executor/executor_failpoint_test.go b/pkg/executor/executor_failpoint_test.go index d68401ef410d6..dc42b18d0a3fd 100644 --- a/pkg/executor/executor_failpoint_test.go +++ b/pkg/executor/executor_failpoint_test.go @@ -210,7 +210,7 @@ func TestSplitRegionTimeout(t *testing.T) { // Test pre-split with timeout. tk.MustExec("drop table if exists t") - tk.MustExec("set @@session.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") require.NoError(t, failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`)) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) start := time.Now() diff --git a/pkg/executor/sample_test.go b/pkg/executor/sample_test.go index cbc4ae39aaad6..83c4ef26900ba 100644 --- a/pkg/executor/sample_test.go +++ b/pkg/executor/sample_test.go @@ -32,7 +32,7 @@ func createSampleTestkit(t *testing.T, store kv.Storage) *testkit.TestKit { tk.MustExec("drop database if exists test_table_sample;") tk.MustExec("create database test_table_sample;") tk.MustExec("use test_table_sample;") - tk.MustExec("set @@session.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") return tk } diff --git a/pkg/executor/test/showtest/show_test.go b/pkg/executor/test/showtest/show_test.go index 3e62de8682d67..2e346b7927492 100644 --- a/pkg/executor/test/showtest/show_test.go +++ b/pkg/executor/test/showtest/show_test.go @@ -807,7 +807,7 @@ func TestAutoRandomWithLargeSignedShowTableRegions(t *testing.T) { tk.MustExec("drop table if exists t;") tk.MustExec("create table t (a bigint unsigned auto_random primary key clustered);") - tk.MustExec("set @@session.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") // 18446744073709541615 is MaxUint64 - 10000. // 18446744073709551615 is the MaxUint64. tk.MustQuery("split table t between (18446744073709541615) and (18446744073709551615) regions 2;"). diff --git a/pkg/executor/test/splittest/split_table_test.go b/pkg/executor/test/splittest/split_table_test.go index d5c498ba94bd0..aade779b94e38 100644 --- a/pkg/executor/test/splittest/split_table_test.go +++ b/pkg/executor/test/splittest/split_table_test.go @@ -41,7 +41,7 @@ func TestClusterIndexShowTableRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") tk.MustExec("drop database if exists cluster_index_regions;") tk.MustExec("create database cluster_index_regions;") tk.MustExec("use cluster_index_regions;") @@ -75,7 +75,7 @@ func TestShowTableRegion(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t_regions") - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))") tk.MustGetErrMsg( @@ -224,7 +224,7 @@ func TestShowTableRegion(t *testing.T) { // Test show table regions for partition table when enable split region when create table. atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set @@session.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") tk.MustExec("drop table if exists partition_t;") tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") re = tk.MustQuery("show table partition_t regions") diff --git a/tests/integrationtest/r/executor/sample.result b/tests/integrationtest/r/executor/sample.result index f7a0bcaae3dae..013d426567e15 100644 --- a/tests/integrationtest/r/executor/sample.result +++ b/tests/integrationtest/r/executor/sample.result @@ -1,4 +1,4 @@ -set @@global.tidb_scatter_region=1 +set @@global.tidb_scatter_region='table' drop table if exists t; set tidb_enable_clustered_index = on; create table t (a varchar(255) primary key, b bigint); diff --git a/tests/integrationtest/t/executor/sample.test b/tests/integrationtest/t/executor/sample.test index 0b73a13be795a..6c270d2ee5b5b 100644 --- a/tests/integrationtest/t/executor/sample.test +++ b/tests/integrationtest/t/executor/sample.test @@ -1,4 +1,4 @@ -set @@global.tidb_scatter_region=1 +set @@global.tidb_scatter_region='table' # TestTableSampleSchema drop table if exists t; From 1e374be7cc9665a80824f7836aa5db8f80aa40e6 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Mon, 23 Sep 2024 17:48:07 +0800 Subject: [PATCH 20/28] address comment --- pkg/ddl/split_region.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index 822138eadfebc..d7554007ab573 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -73,17 +73,16 @@ func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo * } } -// PD controls the scope of scatter region through `tID`. -// 1. nil means PD scatter region at cluster level. -// 2. not nil means PD scatter region at table level when `scatter` is true. -func getScatterConfig(scope scatterScope, tableID int64) (scatter bool, tID *int64) { +// `tID` is used to control the scope of scatter. If it is `scatterTable`, the corresponding tableID is used. +// If it is `scatterGlobal`, then the scatter configured as global uniformly use -1. +func getScatterConfig(scope scatterScope, tableID int64) (scatter bool, tID int64) { switch scope { case scatterTable: - return true, &tableID + return true, tableID case scatterGlobal: - return true, nil + return true, -1 default: - return false, &tableID + return false, tableID } } @@ -130,12 +129,12 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS splitTableKeys = append(splitTableKeys, key) } scatter, tableID := getScatterConfig(scatterScope, tbInfo.ID) - regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, tableID) + regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, &tableID) if err != nil { logutil.DDLLogger().Warn("pre split some table regions failed", zap.Stringer("table", tbInfo.Name), zap.Int("successful region count", len(regionIDs)), zap.Error(err)) } - regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter, tableID)...) + regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter, &tableID)...) return regionIDs } @@ -143,7 +142,7 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scope scatterScope) uint64 { tableStartKey := tablecodec.GenTablePrefix(physicalTableID) scatter, tID := getScatterConfig(scope, tableID) - regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, tID) + regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, &tID) if err != nil { // It will be automatically split by TiKV later. logutil.DDLLogger().Warn("split table region failed", zap.Error(err)) From 8765169e6d01933526cca679596bf1ad7ad679de Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:16:16 +0800 Subject: [PATCH 21/28] resolve conflict --- br/pkg/restore/snap_client/systable_restore_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/br/pkg/restore/snap_client/systable_restore_test.go b/br/pkg/restore/snap_client/systable_restore_test.go index 8fd0c868a30de..6dae14d9d6e17 100644 --- a/br/pkg/restore/snap_client/systable_restore_test.go +++ b/br/pkg/restore/snap_client/systable_restore_test.go @@ -116,5 +116,5 @@ func TestCheckSysTableCompatibility(t *testing.T) { // // The above variables are in the file br/pkg/restore/systable_restore.go func TestMonitorTheSystemTableIncremental(t *testing.T) { - require.Equal(t, int64(214), session.CurrentBootstrapVersion) + require.Equal(t, int64(215), session.CurrentBootstrapVersion) } From 4964a013ba41cc4539279815b8859ee2024bc216 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 13:39:22 +0800 Subject: [PATCH 22/28] fix --- pkg/session/bootstrap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/session/bootstrap.go b/pkg/session/bootstrap.go index 998c781d3dae3..164e727ad0ccd 100644 --- a/pkg/session/bootstrap.go +++ b/pkg/session/bootstrap.go @@ -1167,7 +1167,6 @@ const ( // create `mysql.index_advisor_results` table version214 = 214 - // version 215 // changes variable `tidb_scatter_region` value from ON to "table" and OFF to "". version215 = 215 @@ -3193,7 +3192,7 @@ func upgradeToVer214(s sessiontypes.Session, ver int64) { } func upgradeToVer215(s sessiontypes.Session, ver int64) { - if ver >= version214 { + if ver >= version215 { return } From 9928422f6386db3bc12d4a0d7a5f39263377250b Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:19:59 +0800 Subject: [PATCH 23/28] address comment --- pkg/ddl/executor.go | 12 ++++----- pkg/ddl/split_region.go | 37 +++++++++++----------------- pkg/session/session.go | 2 +- pkg/sessionctx/variable/session.go | 9 +++++++ pkg/sessionctx/variable/sysvar.go | 6 ++--- pkg/sessionctx/variable/tidb_vars.go | 2 +- 6 files changed, 35 insertions(+), 33 deletions(-) diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 61c16aaf85e6c..94b57ad973784 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -1382,21 +1382,21 @@ func preSplitAndScatter(ctx sessionctx.Context, store kv.Storage, tbInfo *model. return } var ( - preSplit func() - scope scatterScope + preSplit func() + scatterScope string ) val, ok := ctx.GetSessionVars().GetSystemVar(variable.TiDBScatterRegion) if !ok { logutil.DDLLogger().Warn("get system variable met problem, won't scatter region") } else { - scope = scatterScope(val) + scatterScope = val } if len(parts) > 0 { - preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scope) } + preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scatterScope) } } else { - preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scope) } + preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterScope) } } - if scope != scatterOff { + if scatterScope != variable.ScatterOff { preSplit() } else { go preSplit() diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index d7554007ab573..8c337a6b1b504 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -16,6 +16,7 @@ package ddl import ( "context" + "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl/logutil" @@ -30,15 +31,7 @@ import ( "go.uber.org/zap" ) -type scatterScope string - -const ( - scatterOff scatterScope = "" - scatterTable scatterScope = "table" - scatterGlobal scatterScope = "global" -) - -func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatterScope scatterScope) { +func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatterScope string) { // Max partition count is 8192, should we sample and just choose some partitions to split? regionIDs := make([]uint64, 0, len(parts)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) @@ -53,40 +46,40 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, def.ID, tbInfo.ID, scatterScope)) } } - if scatterScope != scatterOff { + if scatterScope != variable.ScatterOff { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scope scatterScope) { +func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatterScope string) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) var regionIDs []uint64 if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { - regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scope) + regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatterScope) } else { - regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scope)) + regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scatterScope)) } - if scope != scatterOff { + if scatterScope != variable.ScatterOff { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -// `tID` is used to control the scope of scatter. If it is `scatterTable`, the corresponding tableID is used. -// If it is `scatterGlobal`, then the scatter configured as global uniformly use -1. -func getScatterConfig(scope scatterScope, tableID int64) (scatter bool, tID int64) { +// `tID` is used to control the scope of scatter. If it is `ScatterTable`, the corresponding tableID is used. +// If it is `ScatterGlobal`, then the scatter configured as global uniformly use -1. +func getScatterConfig(scope string, tableID int64) (scatter bool, tID int64) { switch scope { - case scatterTable: + case variable.ScatterTable: return true, tableID - case scatterGlobal: + case variable.ScatterGlobal: return true, -1 default: return false, tableID } } -func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope scatterScope) []uint64 { +func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope string) []uint64 { // Example: // sharding_bits = 4 // PreSplitRegions = 2 @@ -139,9 +132,9 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS } // SplitRecordRegion is to split region in store by table prefix. -func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scope scatterScope) uint64 { +func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatterScope string) uint64 { tableStartKey := tablecodec.GenTablePrefix(physicalTableID) - scatter, tID := getScatterConfig(scope, tableID) + scatter, tID := getScatterConfig(scatterScope, tableID) regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, &tID) if err != nil { // It will be automatically split by TiKV later. diff --git a/pkg/session/session.go b/pkg/session/session.go index a5b06f23a55ec..61b6aa8234d98 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -3211,7 +3211,7 @@ func splitAndScatterTable(store kv.Storage, tableIDs []int64) { for _, id := range tableIDs { regionIDs = append(regionIDs, ddl.SplitRecordRegion(ctxWithTimeout, s, id, id, variable.DefTiDBScatterRegion)) } - if variable.DefTiDBScatterRegion != "" { + if variable.DefTiDBScatterRegion != variable.ScatterOff { ddl.WaitScatterRegionFinish(ctxWithTimeout, s, regionIDs...) } cancel() diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index beab458cdc31f..f981d9a40c562 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -4021,3 +4021,12 @@ func (s *SessionVars) PessimisticLockEligible() bool { } return false } + +const ( + // ScatterOff means default, will not scatter region + ScatterOff string = "" + // ScatterTable means scatter region at table level + ScatterTable string = "table" + // ScatterGlobal means scatter region at global level + ScatterGlobal string = "global" +) diff --git a/pkg/sessionctx/variable/sysvar.go b/pkg/sessionctx/variable/sysvar.go index f027d7ba657b9..bceb4f078b4ec 100644 --- a/pkg/sessionctx/variable/sysvar.go +++ b/pkg/sessionctx/variable/sysvar.go @@ -775,7 +775,7 @@ var defaultSysVars = []*SysVar{ SetMaxDeltaSchemaCount(TidbOptInt64(val, DefTiDBMaxDeltaSchemaCount)) return nil }}, - {Scope: ScopeGlobal | ScopeSession, Name: TiDBScatterRegion, Value: DefTiDBScatterRegion, PossibleValues: []string{"", "table", "global"}, Type: TypeStr, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBScatterRegion, Value: DefTiDBScatterRegion, PossibleValues: []string{ScatterOff, ScatterTable, ScatterGlobal}, Type: TypeStr, SetSession: func(vars *SessionVars, val string) error { vars.ScatterRegion = val return nil @@ -784,8 +784,8 @@ var defaultSysVars = []*SysVar{ return vars.ScatterRegion, nil }, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { - if normalizedValue != "" && normalizedValue != "table" && normalizedValue != "global" { - return "", fmt.Errorf("invalid value for '%s', it should be either '%s', '%s' or '%s'", normalizedValue, "", "table", "global") + if normalizedValue != ScatterOff && normalizedValue != ScatterTable && normalizedValue != ScatterGlobal { + return "", fmt.Errorf("invalid value for '%s', it should be either '%s', '%s' or '%s'", normalizedValue, ScatterOff, ScatterTable, ScatterGlobal) } return normalizedValue, nil }, diff --git a/pkg/sessionctx/variable/tidb_vars.go b/pkg/sessionctx/variable/tidb_vars.go index 8146c6ca6b8e4..14a5f1460bd28 100644 --- a/pkg/sessionctx/variable/tidb_vars.go +++ b/pkg/sessionctx/variable/tidb_vars.go @@ -1338,7 +1338,7 @@ const ( DefTiDBSkipIsolationLevelCheck = false DefTiDBExpensiveQueryTimeThreshold = 60 // 60s DefTiDBExpensiveTxnTimeThreshold = 60 * 10 // 10 minutes - DefTiDBScatterRegion = "" + DefTiDBScatterRegion = ScatterOff DefTiDBWaitSplitRegionFinish = true DefWaitSplitRegionTimeout = 300 // 300s DefTiDBEnableNoopFuncs = Off From ca180a3cf7a2213cf4e9b740a35b3b5b999dd10e Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:21:17 +0800 Subject: [PATCH 24/28] address comment --- pkg/ddl/partition.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index 1c5a0db98ce77..f05a435202c56 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -3231,8 +3231,8 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, t *meta.Meta, job *mo // and we will soon start writing to the new partitions. if s, ok := jobCtx.store.(kv.SplittableStore); ok && s != nil { // 1. partInfo only contains the AddingPartitions - // 2. scatterTable control all new split region need waiting for scatter region finish at table level. - splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, scatterTable) + // 2. ScatterTable control all new split region need waiting for scatter region finish at table level. + splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, variable.ScatterTable) } // Assume we cannot have more than MaxUint64 rows, set the progress to 1/10 of that. From df2b7eead3bb579bcb3f4902abe1cdb0b0a76a43 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:38:03 +0800 Subject: [PATCH 25/28] address comment --- pkg/ddl/split_region.go | 2 +- pkg/ddl/table_split_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index 8c337a6b1b504..0329ec326eddd 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -16,7 +16,6 @@ package ddl import ( "context" - "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl/logutil" @@ -25,6 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" tikverr "github.com/tikv/client-go/v2/error" diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index 227255506a87f..c293be9c16acc 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -109,6 +109,10 @@ func TestScatterRegion(t *testing.T) { err := tk.ExecToErr("set @@tidb_scatter_region = 'test';") require.ErrorContains(t, err, "invalid value for 'test', it should be either '', 'table' or 'global'") + err = tk.ExecToErr("set @@tidb_scatter_region = '1';") + require.ErrorContains(t, err, "invalid value for '1', it should be either '', 'table' or 'global'") + err = tk.ExecToErr("set @@tidb_scatter_region = 0;") + require.ErrorContains(t, err, "invalid value for '0', it should be either '', 'table' or 'global'") } type kvStore interface { From 8c36330a92a72ae858d312777597800f059e44d6 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:41:21 +0800 Subject: [PATCH 26/28] fix comment --- pkg/ddl/split_region.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index 0329ec326eddd..5152197279ff5 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -67,7 +67,7 @@ func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo * } // `tID` is used to control the scope of scatter. If it is `ScatterTable`, the corresponding tableID is used. -// If it is `ScatterGlobal`, then the scatter configured as global uniformly use -1. +// If it is `ScatterGlobal`, the scatter configured at global level uniformly use -1 as `tID`. func getScatterConfig(scope string, tableID int64) (scatter bool, tID int64) { switch scope { case variable.ScatterTable: From 1f3e2aed4536167981d2ecb03549fb8841ecc245 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Tue, 24 Sep 2024 16:16:28 +0800 Subject: [PATCH 27/28] add comment to describe the testcase(TestScatterRegion) target --- pkg/ddl/table_split_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index c293be9c16acc..7ee9ebec021c4 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -82,6 +82,11 @@ func TestTableSplit(t *testing.T) { } } +// TestScatterRegion test the behavior of the tidb_scatter_region system variable, for verifying: +// 1. The variable can be set and queried correctly at both session and global levels. +// 2. Changes to the global variable affect new sessions but not existing ones. +// 3. The variable only accepts valid values (”, 'table', 'global'). +// 4. Attempts to set invalid values result in appropriate error messages. func TestScatterRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) From 7a160587b80fdb4cb0fc74587b5f9666b0487f93 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang <96465211+River2000i@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:03:01 +0800 Subject: [PATCH 28/28] resolve conflict --- br/pkg/restore/snap_client/systable_restore_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/br/pkg/restore/snap_client/systable_restore_test.go b/br/pkg/restore/snap_client/systable_restore_test.go index 6dae14d9d6e17..8b1b464023af0 100644 --- a/br/pkg/restore/snap_client/systable_restore_test.go +++ b/br/pkg/restore/snap_client/systable_restore_test.go @@ -116,5 +116,5 @@ func TestCheckSysTableCompatibility(t *testing.T) { // // The above variables are in the file br/pkg/restore/systable_restore.go func TestMonitorTheSystemTableIncremental(t *testing.T) { - require.Equal(t, int64(215), session.CurrentBootstrapVersion) + require.Equal(t, int64(216), session.CurrentBootstrapVersion) }