Skip to content

Commit d92dce0

Browse files
authored
ttl: reduce some warnings logs when locking TTL tasks (#58306)
close #58305
1 parent db2776a commit d92dce0

File tree

4 files changed

+42
-34
lines changed

4 files changed

+42
-34
lines changed

pkg/ttl/ttlworker/job_manager_test.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
"github.com/pingcap/tidb/pkg/util/chunk"
3434
"github.com/stretchr/testify/assert"
3535
"github.com/stretchr/testify/require"
36+
"github.com/tikv/client-go/v2/testutils"
3637
"github.com/tikv/client-go/v2/tikv"
3738
"github.com/tikv/client-go/v2/tikvrpc"
3839
)
@@ -673,10 +674,18 @@ func TestLocalJobs(t *testing.T) {
673674
}
674675

675676
func TestSplitCnt(t *testing.T) {
677+
mockClient, _, pdClient, err := testutils.NewMockTiKV("", nil)
678+
require.NoError(t, err)
679+
defer func() {
680+
pdClient.Close()
681+
err = mockClient.Close()
682+
require.NoError(t, err)
683+
}()
684+
676685
require.Equal(t, 64, getScanSplitCnt(nil))
677686
require.Equal(t, 64, getScanSplitCnt(&mockKVStore{}))
678687

679-
s := &mockTiKVStore{regionCache: tikv.NewRegionCache(nil)}
688+
s := &mockTiKVStore{regionCache: tikv.NewRegionCache(pdClient)}
680689
for i := uint64(1); i <= 128; i++ {
681690
s.GetRegionCache().SetRegionCacheStore(i, "", "", tikvrpc.TiKV, 1, nil)
682691
if i <= 64 {

pkg/ttl/ttlworker/task_manager.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,15 @@ func (m *taskManager) rescheduleTasks(se session.Session, now time.Time) {
309309
return
310310
}
311311

312+
if len(tasks) == 0 {
313+
return
314+
}
315+
316+
err = m.infoSchemaCache.Update(se)
317+
if err != nil {
318+
logutil.Logger(m.ctx).Warn("fail to update infoSchemaCache", zap.Error(err))
319+
return
320+
}
312321
loop:
313322
for _, t := range tasks {
314323
logger := logutil.Logger(m.ctx).With(

pkg/ttl/ttlworker/task_manager_integration_test.go

Lines changed: 13 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,6 @@ func TestParallelSchedule(t *testing.T) {
130130
sql := fmt.Sprintf("insert into mysql.tidb_ttl_task(job_id,table_id,scan_id,expire_time,created_time) values ('test-job', %d, %d, NOW(), NOW())", table.Meta().ID, i)
131131
tk.MustExec(sql)
132132
}
133-
isc := cache.NewInfoSchemaCache(time.Second)
134-
require.NoError(t, isc.Update(sessionFactory()))
135133
scheduleWg := sync.WaitGroup{}
136134
finishTasks := make([]func(), 0, 4)
137135
for i := 0; i < 4; i++ {
@@ -143,7 +141,7 @@ func TestParallelSchedule(t *testing.T) {
143141
}
144142

145143
managerID := fmt.Sprintf("task-manager-%d", i)
146-
m := ttlworker.NewTaskManager(context.Background(), nil, isc, managerID, store)
144+
m := ttlworker.NewTaskManager(context.Background(), nil, cache.NewInfoSchemaCache(time.Second), managerID, store)
147145
m.SetScanWorkers4Test(workers)
148146
scheduleWg.Add(1)
149147
go func() {
@@ -187,14 +185,10 @@ func TestTaskScheduleExpireHeartBeat(t *testing.T) {
187185
sql := fmt.Sprintf("insert into mysql.tidb_ttl_task(job_id,table_id,scan_id,expire_time,created_time) values ('test-job', %d, %d, NOW(), NOW())", table.Meta().ID, 1)
188186
tk.MustExec(sql)
189187

190-
// update the infoschema cache
191-
isc := cache.NewInfoSchemaCache(time.Second)
192-
require.NoError(t, isc.Update(sessionFactory()))
193-
194188
// schedule in a task manager
195189
scanWorker := ttlworker.NewMockScanWorker(t)
196190
scanWorker.Start()
197-
m := ttlworker.NewTaskManager(context.Background(), nil, isc, "task-manager-1", store)
191+
m := ttlworker.NewTaskManager(context.Background(), nil, cache.NewInfoSchemaCache(time.Second), "task-manager-1", store)
198192
m.SetScanWorkers4Test([]ttlworker.Worker{scanWorker})
199193
se := sessionFactory()
200194
now := se.Now()
@@ -204,7 +198,7 @@ func TestTaskScheduleExpireHeartBeat(t *testing.T) {
204198
// another task manager should fetch this task after heartbeat expire
205199
scanWorker2 := ttlworker.NewMockScanWorker(t)
206200
scanWorker2.Start()
207-
m2 := ttlworker.NewTaskManager(context.Background(), nil, isc, "task-manager-2", store)
201+
m2 := ttlworker.NewTaskManager(context.Background(), nil, cache.NewInfoSchemaCache(time.Second), "task-manager-2", store)
208202
m2.SetScanWorkers4Test([]ttlworker.Worker{scanWorker2})
209203
m2.RescheduleTasks(sessionFactory(), now.Add(time.Hour))
210204
tk.MustQuery("select status,owner_id from mysql.tidb_ttl_task").Check(testkit.Rows("running task-manager-2"))
@@ -215,7 +209,7 @@ func TestTaskScheduleExpireHeartBeat(t *testing.T) {
215209
m2.CheckFinishedTask(sessionFactory(), now)
216210
scanWorker3 := ttlworker.NewMockScanWorker(t)
217211
scanWorker3.Start()
218-
m3 := ttlworker.NewTaskManager(context.Background(), nil, isc, "task-manager-3", store)
212+
m3 := ttlworker.NewTaskManager(context.Background(), nil, cache.NewInfoSchemaCache(time.Second), "task-manager-3", store)
219213
m3.SetScanWorkers4Test([]ttlworker.Worker{scanWorker3})
220214
m3.RescheduleTasks(sessionFactory(), now.Add(time.Hour))
221215
tk.MustQuery("select status,owner_id from mysql.tidb_ttl_task").Check(testkit.Rows("finished task-manager-2"))
@@ -235,14 +229,10 @@ func TestTaskMetrics(t *testing.T) {
235229
sql := fmt.Sprintf("insert into mysql.tidb_ttl_task(job_id,table_id,scan_id,expire_time,created_time) values ('test-job', %d, %d, NOW(), NOW())", table.Meta().ID, 1)
236230
tk.MustExec(sql)
237231

238-
// update the infoschema cache
239-
isc := cache.NewInfoSchemaCache(time.Second)
240-
require.NoError(t, isc.Update(sessionFactory()))
241-
242232
// schedule in a task manager
243233
scanWorker := ttlworker.NewMockScanWorker(t)
244234
scanWorker.Start()
245-
m := ttlworker.NewTaskManager(context.Background(), nil, isc, "task-manager-1", store)
235+
m := ttlworker.NewTaskManager(context.Background(), nil, cache.NewInfoSchemaCache(time.Minute), "task-manager-1", store)
246236
m.SetScanWorkers4Test([]ttlworker.Worker{scanWorker})
247237
se := sessionFactory()
248238
now := se.Now()
@@ -268,13 +258,11 @@ func TestRescheduleWithError(t *testing.T) {
268258

269259
se := sessionFactory()
270260
now := se.Now()
271-
isc := cache.NewInfoSchemaCache(time.Second)
272-
require.NoError(t, isc.Update(se))
273261

274262
// schedule in a task manager
275263
scanWorker := ttlworker.NewMockScanWorker(t)
276264
scanWorker.Start()
277-
m := ttlworker.NewTaskManager(context.Background(), nil, isc, "task-manager-1", store)
265+
m := ttlworker.NewTaskManager(context.Background(), nil, cache.NewInfoSchemaCache(time.Minute), "task-manager-1", store)
278266
m.SetScanWorkers4Test([]ttlworker.Worker{scanWorker})
279267
notify := make(chan struct{})
280268
go func() {
@@ -307,8 +295,7 @@ func TestTTLRunningTasksLimitation(t *testing.T) {
307295
sql := fmt.Sprintf("insert into mysql.tidb_ttl_task(job_id,table_id,scan_id,expire_time,created_time) values ('test-job', %d, %d, NOW(), NOW())", table.Meta().ID, i)
308296
tk.MustExec(sql)
309297
}
310-
isc := cache.NewInfoSchemaCache(time.Second)
311-
require.NoError(t, isc.Update(sessionFactory()))
298+
312299
scheduleWg := sync.WaitGroup{}
313300
for i := 0; i < 16; i++ {
314301
workers := []ttlworker.Worker{}
@@ -319,7 +306,7 @@ func TestTTLRunningTasksLimitation(t *testing.T) {
319306
}
320307

321308
ctx := logutil.WithKeyValue(context.Background(), "ttl-worker-test", fmt.Sprintf("task-manager-%d", i))
322-
m := ttlworker.NewTaskManager(ctx, nil, isc, fmt.Sprintf("task-manager-%d", i), store)
309+
m := ttlworker.NewTaskManager(ctx, nil, cache.NewInfoSchemaCache(time.Minute), fmt.Sprintf("task-manager-%d", i), store)
323310
m.SetScanWorkers4Test(workers)
324311
scheduleWg.Add(1)
325312
go func() {
@@ -384,9 +371,7 @@ func TestShrinkScanWorkerAndResignOwner(t *testing.T) {
384371
se := sessionFactory()
385372
now := se.Now()
386373

387-
isc := cache.NewInfoSchemaCache(time.Minute)
388-
require.NoError(t, isc.Update(se))
389-
m := ttlworker.NewTaskManager(context.Background(), pool, isc, "scan-manager-1", store)
374+
m := ttlworker.NewTaskManager(context.Background(), pool, cache.NewInfoSchemaCache(time.Minute), "scan-manager-1", store)
390375

391376
startBlockNotifyCh := make(chan struct{})
392377
blockCancelCh := make(chan struct{})
@@ -522,7 +507,7 @@ func TestShrinkScanWorkerAndResignOwner(t *testing.T) {
522507
))
523508

524509
// A resigned task can be obtained by other task managers
525-
m2 := ttlworker.NewTaskManager(context.Background(), pool, isc, "scan-manager-2", store)
510+
m2 := ttlworker.NewTaskManager(context.Background(), pool, cache.NewInfoSchemaCache(time.Minute), "scan-manager-2", store)
526511
worker2 := ttlworker.NewMockScanWorker(t)
527512
worker2.Start()
528513
defer func() {
@@ -562,8 +547,6 @@ func TestTaskCancelledAfterHeartbeatTimeout(t *testing.T) {
562547
sql := fmt.Sprintf("insert into mysql.tidb_ttl_task(job_id,table_id,scan_id,expire_time,created_time) values ('test-job', %d, %d, NOW(), NOW())", table.Meta().ID, i)
563548
tk.MustExec(sql)
564549
}
565-
isc := cache.NewInfoSchemaCache(time.Second)
566-
require.NoError(t, isc.Update(se))
567550

568551
workers := []ttlworker.Worker{}
569552
for j := 0; j < 8; j++ {
@@ -573,10 +556,10 @@ func TestTaskCancelledAfterHeartbeatTimeout(t *testing.T) {
573556
}
574557

575558
now := se.Now()
576-
m1 := ttlworker.NewTaskManager(context.Background(), pool, isc, "task-manager-1", store)
559+
m1 := ttlworker.NewTaskManager(context.Background(), pool, cache.NewInfoSchemaCache(time.Minute), "task-manager-1", store)
577560
m1.SetScanWorkers4Test(workers[0:4])
578561
m1.RescheduleTasks(se, now)
579-
m2 := ttlworker.NewTaskManager(context.Background(), pool, isc, "task-manager-2", store)
562+
m2 := ttlworker.NewTaskManager(context.Background(), pool, cache.NewInfoSchemaCache(time.Minute), "task-manager-2", store)
580563
m2.SetScanWorkers4Test(workers[4:])
581564

582565
// All tasks should be scheduled to m1 and running
@@ -665,9 +648,7 @@ func TestHeartBeatErrorNotBlockOthers(t *testing.T) {
665648
se := sessionFactory()
666649
now := se.Now()
667650

668-
isc := cache.NewInfoSchemaCache(time.Minute)
669-
require.NoError(t, isc.Update(se))
670-
m := ttlworker.NewTaskManager(context.Background(), pool, isc, "task-manager-1", store)
651+
m := ttlworker.NewTaskManager(context.Background(), pool, cache.NewInfoSchemaCache(time.Minute), "task-manager-1", store)
671652
workers := []ttlworker.Worker{}
672653
for j := 0; j < 4; j++ {
673654
scanWorker := ttlworker.NewMockScanWorker(t)

pkg/ttl/ttlworker/task_manager_test.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
"github.com/pingcap/tidb/pkg/util/logutil"
2828
"github.com/stretchr/testify/assert"
2929
"github.com/stretchr/testify/require"
30+
"github.com/tikv/client-go/v2/testutils"
3031
"github.com/tikv/client-go/v2/tikv"
3132
"github.com/tikv/client-go/v2/tikvrpc"
3233
)
@@ -283,6 +284,14 @@ func (s *mockTiKVStore) GetRegionCache() *tikv.RegionCache {
283284
}
284285

285286
func TestGetMaxRunningTasksLimit(t *testing.T) {
287+
mockClient, _, pdClient, err := testutils.NewMockTiKV("", nil)
288+
require.NoError(t, err)
289+
defer func() {
290+
pdClient.Close()
291+
err = mockClient.Close()
292+
require.NoError(t, err)
293+
}()
294+
286295
variable.TTLRunningTasks.Store(1)
287296
require.Equal(t, 1, getMaxRunningTasksLimit(&mockTiKVStore{}))
288297

@@ -294,7 +303,7 @@ func TestGetMaxRunningTasksLimit(t *testing.T) {
294303
require.Equal(t, variable.MaxConfigurableConcurrency, getMaxRunningTasksLimit(&mockKVStore{}))
295304
require.Equal(t, variable.MaxConfigurableConcurrency, getMaxRunningTasksLimit(&mockTiKVStore{}))
296305

297-
s := &mockTiKVStore{regionCache: tikv.NewRegionCache(nil)}
306+
s := &mockTiKVStore{regionCache: tikv.NewRegionCache(pdClient)}
298307
s.GetRegionCache().SetRegionCacheStore(1, "", "", tikvrpc.TiKV, 1, nil)
299308
s.GetRegionCache().SetRegionCacheStore(2, "", "", tikvrpc.TiKV, 1, nil)
300309
s.GetRegionCache().SetRegionCacheStore(3, "", "", tikvrpc.TiFlash, 1, nil)

0 commit comments

Comments
 (0)