Skip to content

Commit 4670bf5

Browse files
authored
ddl: separate sessionctx.Context in backfillCtx to sub-contexts (#53671)
ref #53388
1 parent 3fdb963 commit 4670bf5

File tree

12 files changed

+121
-73
lines changed

12 files changed

+121
-73
lines changed

pkg/ddl/backfilling.go

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929
"github.com/pingcap/tidb/pkg/ddl/logutil"
3030
ddlutil "github.com/pingcap/tidb/pkg/ddl/util"
3131
"github.com/pingcap/tidb/pkg/expression"
32+
exprctx "github.com/pingcap/tidb/pkg/expression/context"
3233
"github.com/pingcap/tidb/pkg/kv"
3334
"github.com/pingcap/tidb/pkg/metrics"
3435
"github.com/pingcap/tidb/pkg/parser/model"
@@ -40,6 +41,7 @@ import (
4041
"github.com/pingcap/tidb/pkg/table"
4142
"github.com/pingcap/tidb/pkg/tablecodec"
4243
"github.com/pingcap/tidb/pkg/util"
44+
contextutil "github.com/pingcap/tidb/pkg/util/context"
4345
"github.com/pingcap/tidb/pkg/util/dbterror"
4446
decoder "github.com/pingcap/tidb/pkg/util/rowDecoder"
4547
"github.com/pingcap/tidb/pkg/util/topsql"
@@ -145,29 +147,44 @@ type backfillCtx struct {
145147
id int
146148
*ddlCtx
147149
sessCtx sessionctx.Context
150+
warnings contextutil.WarnHandlerExt
151+
loc *time.Location
152+
exprCtx exprctx.BuildContext
153+
tblCtx table.MutateContext
148154
schemaName string
149155
table table.Table
150156
batchCnt int
151157
jobContext *JobContext
152158
metricCounter prometheus.Counter
153159
}
154160

155-
func newBackfillCtx(ctx *ddlCtx, id int, sessCtx sessionctx.Context,
156-
schemaName string, tbl table.Table, jobCtx *JobContext, label string, isDistributed bool) *backfillCtx {
161+
func newBackfillCtx(id int, rInfo *reorgInfo,
162+
schemaName string, tbl table.Table, jobCtx *JobContext, label string, isDistributed bool) (*backfillCtx, error) {
163+
sessCtx, err := newSessCtx(rInfo.d.store, rInfo.ReorgMeta)
164+
if err != nil {
165+
return nil, err
166+
}
167+
157168
if isDistributed {
158169
id = int(backfillContextID.Add(1))
159170
}
171+
172+
exprCtx := sessCtx.GetExprCtx()
160173
return &backfillCtx{
161174
id: id,
162-
ddlCtx: ctx,
175+
ddlCtx: rInfo.d,
163176
sessCtx: sessCtx,
177+
warnings: sessCtx.GetSessionVars().StmtCtx.WarnHandler,
178+
exprCtx: exprCtx,
179+
tblCtx: sessCtx.GetTableCtx(),
180+
loc: exprCtx.GetEvalCtx().Location(),
164181
schemaName: schemaName,
165182
table: tbl,
166183
batchCnt: int(variable.GetDDLReorgBatchSize()),
167184
jobContext: jobCtx,
168185
metricCounter: metrics.BackfillTotalCounter.WithLabelValues(
169186
metrics.GenerateReorgLabel(label, schemaName, tbl.Meta().Name.String())),
170-
}
187+
}, nil
171188
}
172189

173190
func updateTxnEntrySizeLimitIfNeeded(txn kv.Transaction) {

pkg/ddl/backfilling_scheduler.go

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -274,17 +274,17 @@ func (b *txnBackfillScheduler) adjustWorkerSize() error {
274274
workerCnt := b.expectedWorkerSize()
275275
// Increase the worker.
276276
for i := len(b.workers); i < workerCnt; i++ {
277-
sessCtx, err := newSessCtx(reorgInfo.d.store, reorgInfo.ReorgMeta)
278-
if err != nil {
279-
return err
280-
}
281277
var (
282278
runner *backfillWorker
283279
worker backfiller
284280
)
285281
switch b.tp {
286282
case typeAddIndexWorker:
287-
backfillCtx := newBackfillCtx(reorgInfo.d, i, sessCtx, job.SchemaName, b.tbl, jc, "add_idx_rate", false)
283+
backfillCtx, err := newBackfillCtx(i, reorgInfo, job.SchemaName, b.tbl, jc, "add_idx_rate", false)
284+
if err != nil {
285+
return err
286+
}
287+
288288
idxWorker, err := newAddIndexTxnWorker(b.decodeColMap, b.tbl, backfillCtx,
289289
job.ID, reorgInfo.elements, reorgInfo.currElement.TypeKey)
290290
if err != nil {
@@ -293,23 +293,29 @@ func (b *txnBackfillScheduler) adjustWorkerSize() error {
293293
runner = newBackfillWorker(b.ctx, idxWorker)
294294
worker = idxWorker
295295
case typeAddIndexMergeTmpWorker:
296-
backfillCtx := newBackfillCtx(reorgInfo.d, i, sessCtx, job.SchemaName, b.tbl, jc, "merge_tmp_idx_rate", false)
296+
backfillCtx, err := newBackfillCtx(i, reorgInfo, job.SchemaName, b.tbl, jc, "merge_tmp_idx_rate", false)
297+
if err != nil {
298+
return err
299+
}
297300
tmpIdxWorker := newMergeTempIndexWorker(backfillCtx, b.tbl, reorgInfo.elements)
298301
runner = newBackfillWorker(b.ctx, tmpIdxWorker)
299302
worker = tmpIdxWorker
300303
case typeUpdateColumnWorker:
301-
sessCtx.GetSessionVars().StmtCtx.SetTypeFlags(
302-
sessCtx.GetSessionVars().StmtCtx.TypeFlags().
303-
WithIgnoreZeroDateErr(!reorgInfo.ReorgMeta.SQLMode.HasStrictMode()))
304-
updateWorker := newUpdateColumnWorker(sessCtx, i, b.tbl, b.decodeColMap, reorgInfo, jc)
304+
updateWorker, err := newUpdateColumnWorker(i, b.tbl, b.decodeColMap, reorgInfo, jc)
305+
if err != nil {
306+
return err
307+
}
305308
runner = newBackfillWorker(b.ctx, updateWorker)
306309
worker = updateWorker
307310
case typeCleanUpIndexWorker:
308-
idxWorker := newCleanUpIndexWorker(sessCtx, i, b.tbl, b.decodeColMap, reorgInfo, jc)
311+
idxWorker, err := newCleanUpIndexWorker(i, b.tbl, b.decodeColMap, reorgInfo, jc)
312+
if err != nil {
313+
return err
314+
}
309315
runner = newBackfillWorker(b.ctx, idxWorker)
310316
worker = idxWorker
311317
case typeReorgPartitionWorker:
312-
partWorker, err := newReorgPartitionWorker(sessCtx, i, b.tbl, b.decodeColMap, reorgInfo, jc)
318+
partWorker, err := newReorgPartitionWorker(i, b.tbl, b.decodeColMap, reorgInfo, jc)
313319
if err != nil {
314320
return err
315321
}

pkg/ddl/column.go

Lines changed: 31 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1212,11 +1212,23 @@ type updateColumnWorker struct {
12121212
checksumNeeded bool
12131213
}
12141214

1215-
func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *updateColumnWorker {
1215+
func newUpdateColumnWorker(id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) (*updateColumnWorker, error) {
1216+
bCtx, err := newBackfillCtx(id, reorgInfo, reorgInfo.SchemaName, t, jc, "update_col_rate", false)
1217+
if err != nil {
1218+
return nil, err
1219+
}
1220+
1221+
sessCtx := bCtx.sessCtx
1222+
sessCtx.GetSessionVars().StmtCtx.SetTypeFlags(
1223+
sessCtx.GetSessionVars().StmtCtx.TypeFlags().
1224+
WithIgnoreZeroDateErr(!reorgInfo.ReorgMeta.SQLMode.HasStrictMode()))
1225+
bCtx.exprCtx = bCtx.sessCtx.GetExprCtx()
1226+
bCtx.tblCtx = bCtx.sessCtx.GetTableCtx()
1227+
12161228
if !bytes.Equal(reorgInfo.currElement.TypeKey, meta.ColumnElementKey) {
12171229
logutil.DDLLogger().Error("Element type for updateColumnWorker incorrect", zap.String("jobQuery", reorgInfo.Query),
12181230
zap.Stringer("reorgInfo", reorgInfo))
1219-
return nil
1231+
return nil, nil
12201232
}
12211233
var oldCol, newCol *model.ColumnInfo
12221234
for _, col := range t.WritableCols() {
@@ -1248,13 +1260,13 @@ func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalT
12481260
}
12491261
}
12501262
return &updateColumnWorker{
1251-
backfillCtx: newBackfillCtx(reorgInfo.d, id, sessCtx, reorgInfo.SchemaName, t, jc, "update_col_rate", false),
1263+
backfillCtx: bCtx,
12521264
oldColInfo: oldCol,
12531265
newColInfo: newCol,
12541266
rowDecoder: rowDecoder,
12551267
rowMap: make(map[int64]types.Datum, len(decodeColMap)),
12561268
checksumNeeded: checksumNeeded,
1257-
}
1269+
}, nil
12581270
}
12591271

12601272
func (w *updateColumnWorker) AddMetricInfo(cnt float64) {
@@ -1294,7 +1306,7 @@ func (w *updateColumnWorker) fetchRowColVals(txn kv.Transaction, taskRange reorg
12941306
taskDone := false
12951307
var lastAccessedHandle kv.Key
12961308
oprStartTime := startTime
1297-
err := iterateSnapshotKeys(w.jobContext, w.sessCtx.GetStore(), taskRange.priority, taskRange.physicalTable.RecordPrefix(),
1309+
err := iterateSnapshotKeys(w.jobContext, w.ddlCtx.store, taskRange.priority, taskRange.physicalTable.RecordPrefix(),
12981310
txn.StartTS(), taskRange.startKey, taskRange.endKey, func(handle kv.Handle, recordKey kv.Key, rawRow []byte) (bool, error) {
12991311
oprEndTime := time.Now()
13001312
logSlowOperations(oprEndTime.Sub(oprStartTime), "iterateSnapshotKeys in updateColumnWorker fetchRowColVals", 0)
@@ -1329,8 +1341,8 @@ func (w *updateColumnWorker) fetchRowColVals(txn kv.Transaction, taskRange reorg
13291341
}
13301342

13311343
func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, rawRow []byte) error {
1332-
sysTZ := w.sessCtx.GetSessionVars().StmtCtx.TimeZone()
1333-
_, err := w.rowDecoder.DecodeTheExistedColumnMap(w.sessCtx, handle, rawRow, sysTZ, w.rowMap)
1344+
sysTZ := w.loc
1345+
_, err := w.rowDecoder.DecodeTheExistedColumnMap(w.exprCtx, handle, rawRow, sysTZ, w.rowMap)
13341346
if err != nil {
13351347
return errors.Trace(dbterror.ErrCantDecodeRecord.GenWithStackByArgs("column", err))
13361348
}
@@ -1343,26 +1355,26 @@ func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, ra
13431355

13441356
var recordWarning *terror.Error
13451357
// Since every updateColumnWorker handle their own work individually, we can cache warning in statement context when casting datum.
1346-
oldWarn := w.sessCtx.GetSessionVars().StmtCtx.GetWarnings()
1358+
oldWarn := w.warnings.GetWarnings()
13471359
if oldWarn == nil {
13481360
oldWarn = []contextutil.SQLWarn{}
13491361
} else {
13501362
oldWarn = oldWarn[:0]
13511363
}
1352-
w.sessCtx.GetSessionVars().StmtCtx.SetWarnings(oldWarn)
1364+
w.warnings.SetWarnings(oldWarn)
13531365
val := w.rowMap[w.oldColInfo.ID]
13541366
col := w.newColInfo
13551367
if val.Kind() == types.KindNull && col.FieldType.GetType() == mysql.TypeTimestamp && mysql.HasNotNullFlag(col.GetFlag()) {
1356-
if v, err := expression.GetTimeCurrentTimestamp(w.sessCtx.GetExprCtx().GetEvalCtx(), col.GetType(), col.GetDecimal()); err == nil {
1368+
if v, err := expression.GetTimeCurrentTimestamp(w.exprCtx.GetEvalCtx(), col.GetType(), col.GetDecimal()); err == nil {
13571369
// convert null value to timestamp should be substituted with current timestamp if NOT_NULL flag is set.
13581370
w.rowMap[w.oldColInfo.ID] = v
13591371
}
13601372
}
1361-
newColVal, err := table.CastValue(w.sessCtx, w.rowMap[w.oldColInfo.ID], w.newColInfo, false, false)
1373+
newColVal, err := table.CastColumnValue(w.exprCtx, w.rowMap[w.oldColInfo.ID], w.newColInfo, false, false)
13621374
if err != nil {
13631375
return w.reformatErrors(err)
13641376
}
1365-
warn := w.sessCtx.GetSessionVars().StmtCtx.GetWarnings()
1377+
warn := w.warnings.GetWarnings()
13661378
if len(warn) != 0 {
13671379
//nolint:forcetypeassert
13681380
recordWarning = errors.Cause(w.reformatErrors(warn[0].Err)).(*terror.Error)
@@ -1378,7 +1390,7 @@ func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, ra
13781390
})
13791391

13801392
w.rowMap[w.newColInfo.ID] = newColVal
1381-
_, err = w.rowDecoder.EvalRemainedExprColumnMap(w.sessCtx, w.rowMap)
1393+
_, err = w.rowDecoder.EvalRemainedExprColumnMap(w.exprCtx, w.rowMap)
13821394
if err != nil {
13831395
return errors.Trace(err)
13841396
}
@@ -1389,9 +1401,10 @@ func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, ra
13891401
newRow = append(newRow, val)
13901402
}
13911403
checksums := w.calcChecksums()
1392-
sctx, rd := w.sessCtx.GetSessionVars().StmtCtx, &w.sessCtx.GetSessionVars().RowEncoder
1393-
newRowVal, err := tablecodec.EncodeRow(sctx.TimeZone(), newRow, newColumnIDs, nil, nil, rd, checksums...)
1394-
err = sctx.HandleError(err)
1404+
rd := &w.tblCtx.GetSessionVars().RowEncoder
1405+
ec := w.exprCtx.GetEvalCtx().ErrCtx()
1406+
newRowVal, err := tablecodec.EncodeRow(w.loc, newRow, newColumnIDs, nil, nil, rd, checksums...)
1407+
err = ec.HandleError(err)
13951408
if err != nil {
13961409
return errors.Trace(err)
13971410
}
@@ -1423,7 +1436,7 @@ func (w *updateColumnWorker) calcChecksums() []uint32 {
14231436
if !sort.IsSorted(w.checksumBuffer) {
14241437
sort.Sort(w.checksumBuffer)
14251438
}
1426-
checksum, err := w.checksumBuffer.Checksum(w.sessCtx.GetSessionVars().StmtCtx.TimeZone())
1439+
checksum, err := w.checksumBuffer.Checksum(w.loc)
14271440
if err != nil {
14281441
logutil.DDLLogger().Warn("skip checksum in update-column backfill due to encode error", zap.Error(err))
14291442
return nil
@@ -1465,7 +1478,7 @@ func (w *updateColumnWorker) cleanRowMap() {
14651478
func (w *updateColumnWorker) BackfillData(handleRange reorgBackfillTask) (taskCtx backfillTaskContext, errInTxn error) {
14661479
oprStartTime := time.Now()
14671480
ctx := kv.WithInternalSourceAndTaskType(context.Background(), w.jobContext.ddlJobSourceType(), kvutil.ExplicitTypeDDL)
1468-
errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(_ context.Context, txn kv.Transaction) error {
1481+
errInTxn = kv.RunInNewTxn(ctx, w.ddlCtx.store, true, func(_ context.Context, txn kv.Transaction) error {
14691482
taskCtx.addedCount = 0
14701483
taskCtx.scanCount = 0
14711484
updateTxnEntrySizeLimitIfNeeded(txn)

pkg/ddl/index.go

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1438,7 +1438,7 @@ func (w *baseIndexWorker) getIndexRecord(idxInfo *model.IndexInfo, handle kv.Han
14381438
idxVal[j] = idxColumnVal
14391439
continue
14401440
}
1441-
idxColumnVal, err = tables.GetColDefaultValue(w.sessCtx, col, w.defaultVals)
1441+
idxColumnVal, err = tables.GetColDefaultValue(w.exprCtx, col, w.defaultVals)
14421442
if err != nil {
14431443
return nil, errors.Trace(err)
14441444
}
@@ -1469,8 +1469,8 @@ func (w *baseIndexWorker) getNextKey(taskRange reorgBackfillTask, taskDone bool)
14691469
}
14701470

14711471
func (w *baseIndexWorker) updateRowDecoder(handle kv.Handle, rawRecord []byte) error {
1472-
sysZone := w.sessCtx.GetSessionVars().StmtCtx.TimeZone()
1473-
_, err := w.rowDecoder.DecodeAndEvalRowWithMap(w.sessCtx, handle, rawRecord, sysZone, w.rowMap)
1472+
sysZone := w.loc
1473+
_, err := w.rowDecoder.DecodeAndEvalRowWithMap(w.exprCtx, handle, rawRecord, sysZone, w.rowMap)
14741474
return errors.Trace(err)
14751475
}
14761476

@@ -1488,7 +1488,7 @@ func (w *baseIndexWorker) fetchRowColVals(txn kv.Transaction, taskRange reorgBac
14881488
// taskDone means that the reorged handle is out of taskRange.endHandle.
14891489
taskDone := false
14901490
oprStartTime := startTime
1491-
err := iterateSnapshotKeys(w.jobContext, w.sessCtx.GetStore(), taskRange.priority, taskRange.physicalTable.RecordPrefix(), txn.StartTS(),
1491+
err := iterateSnapshotKeys(w.jobContext, w.ddlCtx.store, taskRange.priority, taskRange.physicalTable.RecordPrefix(), txn.StartTS(),
14921492
taskRange.startKey, taskRange.endKey, func(handle kv.Handle, recordKey kv.Key, rawRow []byte) (bool, error) {
14931493
oprEndTime := time.Now()
14941494
logSlowOperations(oprEndTime.Sub(oprStartTime), "iterateSnapshotKeys in baseIndexWorker fetchRowColVals", 0)
@@ -1572,7 +1572,8 @@ func genKeyExistsErr(key, value []byte, idxInfo *model.IndexInfo, tblInfo *model
15721572
// Note that `idxRecords` may belong to multiple indexes.
15731573
func (w *addIndexTxnWorker) batchCheckUniqueKey(txn kv.Transaction, idxRecords []*indexRecord) error {
15741574
w.initBatchCheckBufs(len(idxRecords))
1575-
stmtCtx := w.sessCtx.GetSessionVars().StmtCtx
1575+
evalCtx := w.exprCtx.GetEvalCtx()
1576+
ec := evalCtx.ErrCtx()
15761577
uniqueBatchKeys := make([]kv.Key, 0, len(idxRecords))
15771578
cnt := 0
15781579
for i, record := range idxRecords {
@@ -1588,7 +1589,7 @@ func (w *addIndexTxnWorker) batchCheckUniqueKey(txn kv.Transaction, idxRecords [
15881589
}
15891590
// skip by default.
15901591
idxRecords[i].skip = true
1591-
iter := idx.GenIndexKVIter(stmtCtx.ErrCtx(), stmtCtx.TimeZone(), record.vals, record.handle, idxRecords[i].rsData)
1592+
iter := idx.GenIndexKVIter(ec, w.loc, record.vals, record.handle, idxRecords[i].rsData)
15921593
for iter.Valid() {
15931594
var buf []byte
15941595
if cnt < len(w.idxKeyBufs) {
@@ -1644,7 +1645,7 @@ func (w *addIndexTxnWorker) batchCheckUniqueKey(txn kv.Transaction, idxRecords [
16441645
idxRecords[w.recordIdx[i]].skip = found && idxRecords[w.recordIdx[i]].skip
16451646
}
16461647
// Constrains is already checked.
1647-
stmtCtx.BatchCheck = true
1648+
w.tblCtx.GetSessionVars().StmtCtx.BatchCheck = true
16481649
return nil
16491650
}
16501651

@@ -1868,7 +1869,7 @@ func (w *addIndexTxnWorker) BackfillData(handleRange reorgBackfillTask) (taskCtx
18681869
oprStartTime := time.Now()
18691870
jobID := handleRange.getJobID()
18701871
ctx := kv.WithInternalSourceAndTaskType(context.Background(), w.jobContext.ddlJobSourceType(), kvutil.ExplicitTypeDDL)
1871-
errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(_ context.Context, txn kv.Transaction) (err error) {
1872+
errInTxn = kv.RunInNewTxn(ctx, w.ddlCtx.store, true, func(_ context.Context, txn kv.Transaction) (err error) {
18721873
taskCtx.finishTS = txn.StartTS()
18731874
taskCtx.addedCount = 0
18741875
taskCtx.scanCount = 0
@@ -1907,7 +1908,7 @@ func (w *addIndexTxnWorker) BackfillData(handleRange reorgBackfillTask) (taskCtx
19071908
}
19081909

19091910
handle, err := w.indexes[i%len(w.indexes)].Create(
1910-
w.sessCtx.GetTableCtx(), txn, idxRecord.vals, idxRecord.handle, idxRecord.rsData, table.WithIgnoreAssertion, table.FromBackfill)
1911+
w.tblCtx, txn, idxRecord.vals, idxRecord.handle, idxRecord.rsData, table.WithIgnoreAssertion, table.FromBackfill)
19111912
if err != nil {
19121913
if kv.ErrKeyExists.Equal(err) && idxRecord.handle.Equal(handle) {
19131914
// Index already exists, skip it.
@@ -2417,7 +2418,12 @@ type cleanUpIndexWorker struct {
24172418
baseIndexWorker
24182419
}
24192420

2420-
func newCleanUpIndexWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *cleanUpIndexWorker {
2421+
func newCleanUpIndexWorker(id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) (*cleanUpIndexWorker, error) {
2422+
bCtx, err := newBackfillCtx(id, reorgInfo, reorgInfo.SchemaName, t, jc, "cleanup_idx_rate", false)
2423+
if err != nil {
2424+
return nil, err
2425+
}
2426+
24212427
indexes := make([]table.Index, 0, len(t.Indices()))
24222428
rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap)
24232429
for _, index := range t.Indices() {
@@ -2427,13 +2433,13 @@ func newCleanUpIndexWorker(sessCtx sessionctx.Context, id int, t table.PhysicalT
24272433
}
24282434
return &cleanUpIndexWorker{
24292435
baseIndexWorker: baseIndexWorker{
2430-
backfillCtx: newBackfillCtx(reorgInfo.d, id, sessCtx, reorgInfo.SchemaName, t, jc, "cleanup_idx_rate", false),
2436+
backfillCtx: bCtx,
24312437
indexes: indexes,
24322438
rowDecoder: rowDecoder,
24332439
defaultVals: make([]types.Datum, len(t.WritableCols())),
24342440
rowMap: make(map[int64]types.Datum, len(decodeColMap)),
24352441
},
2436-
}
2442+
}, nil
24372443
}
24382444

24392445
func (w *cleanUpIndexWorker) BackfillData(handleRange reorgBackfillTask) (taskCtx backfillTaskContext, errInTxn error) {
@@ -2446,7 +2452,7 @@ func (w *cleanUpIndexWorker) BackfillData(handleRange reorgBackfillTask) (taskCt
24462452

24472453
oprStartTime := time.Now()
24482454
ctx := kv.WithInternalSourceAndTaskType(context.Background(), w.jobContext.ddlJobSourceType(), kvutil.ExplicitTypeDDL)
2449-
errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(_ context.Context, txn kv.Transaction) error {
2455+
errInTxn = kv.RunInNewTxn(ctx, w.ddlCtx.store, true, func(_ context.Context, txn kv.Transaction) error {
24502456
taskCtx.addedCount = 0
24512457
taskCtx.scanCount = 0
24522458
updateTxnEntrySizeLimitIfNeeded(txn)
@@ -2471,7 +2477,7 @@ func (w *cleanUpIndexWorker) BackfillData(handleRange reorgBackfillTask) (taskCt
24712477
// we fetch records row by row, so records will belong to
24722478
// index[0], index[1] ... index[n-1], index[0], index[1] ...
24732479
// respectively. So indexes[i%n] is the index of idxRecords[i].
2474-
err := w.indexes[i%n].Delete(w.sessCtx.GetTableCtx(), txn, idxRecord.vals, idxRecord.handle)
2480+
err := w.indexes[i%n].Delete(w.tblCtx, txn, idxRecord.vals, idxRecord.handle)
24752481
if err != nil {
24762482
return errors.Trace(err)
24772483
}

0 commit comments

Comments
 (0)