Skip to content

Commit d8dd55b

Browse files
authored
executor: change the evaluation order of columns in Update and Insert statements (#57123) (#59273)
ref #56829
1 parent 161ca53 commit d8dd55b

File tree

5 files changed

+401
-141
lines changed

5 files changed

+401
-141
lines changed

executor/insert.go

Lines changed: 71 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"github.com/pingcap/tidb/parser/model"
3131
"github.com/pingcap/tidb/parser/mysql"
3232
"github.com/pingcap/tidb/parser/terror"
33+
"github.com/pingcap/tidb/sessionctx"
3334
"github.com/pingcap/tidb/table"
3435
"github.com/pingcap/tidb/table/tables"
3536
"github.com/pingcap/tidb/tablecodec"
@@ -196,7 +197,15 @@ func (e *InsertValues) prefetchDataCache(ctx context.Context, txn kv.Transaction
196197
}
197198

198199
// updateDupRow updates a duplicate row to a new row.
199-
func (e *InsertExec) updateDupRow(ctx context.Context, idxInBatch int, txn kv.Transaction, row toBeCheckedRow, handle kv.Handle, onDuplicate []*expression.Assignment, autoColIdx int) error {
200+
func (e *InsertExec) updateDupRow(
201+
ctx context.Context,
202+
idxInBatch int,
203+
txn kv.Transaction,
204+
row toBeCheckedRow,
205+
handle kv.Handle,
206+
_ []*expression.Assignment,
207+
autoColIdx int,
208+
) error {
200209
oldRow, err := getOldRow(ctx, e.ctx, txn, row.t, handle, e.GenExprs)
201210
if err != nil {
202211
return err
@@ -394,8 +403,14 @@ func (e *InsertExec) initEvalBuffer4Dup() {
394403
}
395404

396405
// doDupRowUpdate updates the duplicate row.
397-
func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRow []types.Datum, newRow []types.Datum,
398-
extraCols []types.Datum, cols []*expression.Assignment, idxInBatch int, autoColIdx int) error {
406+
func (e *InsertExec) doDupRowUpdate(
407+
ctx context.Context,
408+
handle kv.Handle,
409+
oldRow, newRow, extraCols []types.Datum,
410+
assigns []*expression.Assignment,
411+
idxInBatch int,
412+
autoColIdx int,
413+
) error {
399414
assignFlag := make([]bool, len(e.Table.WritableCols()))
400415
// See http://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values
401416
e.curInsertVals.SetDatums(newRow...)
@@ -409,40 +424,71 @@ func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRo
409424
e.row4Update = append(e.row4Update, extraCols...)
410425
e.row4Update = append(e.row4Update, newRow...)
411426

412-
// Update old row when the key is duplicated.
413-
e.evalBuffer4Dup.SetDatums(e.row4Update...)
414-
sc := e.ctx.GetSessionVars().StmtCtx
415-
warnCnt := int(sc.WarningCount())
416-
for _, col := range cols {
417-
if col.LazyErr != nil {
418-
return col.LazyErr
419-
}
420-
val, err1 := col.Expr.Eval(e.evalBuffer4Dup.ToRow())
421-
if err1 != nil {
422-
return err1
423-
}
424-
c := col.Col.ToInfo()
425-
c.Name = col.ColName
426-
e.row4Update[col.Col.Index], err1 = table.CastValue(e.ctx, val, c, false, false)
427-
if err1 != nil {
428-
return err1
427+
// Only evaluate non-generated columns here,
428+
// other fields will be evaluated in updateRecord.
429+
var generated, nonGenerated []*expression.Assignment
430+
cols := e.Table.Cols()
431+
for _, assign := range assigns {
432+
if cols[assign.Col.Index].IsGenerated() {
433+
generated = append(generated, assign)
434+
} else {
435+
nonGenerated = append(nonGenerated, assign)
429436
}
437+
}
438+
439+
warnCnt := int(e.ctx.GetSessionVars().StmtCtx.WarningCount())
440+
errorHandler := func(sctx sessionctx.Context, assign *expression.Assignment, val *types.Datum, err error) error {
441+
c := assign.Col.ToInfo()
442+
c.Name = assign.ColName
443+
sc := sctx.GetSessionVars().StmtCtx
444+
430445
if newWarnings := sc.TruncateWarnings(warnCnt); len(newWarnings) > 0 {
431446
for k := range newWarnings {
432447
// Use `idxInBatch` here for simplicity, since the offset of the batch is unknown under the current context.
433-
newWarnings[k].Err = completeInsertErr(c, &val, idxInBatch, newWarnings[k].Err)
448+
newWarnings[k].Err = completeInsertErr(c, val, idxInBatch, newWarnings[k].Err)
434449
}
435450
sc.AppendWarnings(newWarnings)
436451
warnCnt += len(newWarnings)
437452
}
438-
e.evalBuffer4Dup.SetDatum(col.Col.Index, e.row4Update[col.Col.Index])
439-
assignFlag[col.Col.Index] = true
453+
return err
454+
}
455+
456+
// Update old row when the key is duplicated.
457+
e.evalBuffer4Dup.SetDatums(e.row4Update...)
458+
for _, assign := range nonGenerated {
459+
var val types.Datum
460+
if assign.LazyErr != nil {
461+
return assign.LazyErr
462+
}
463+
val, err := assign.Expr.Eval(e.evalBuffer4Dup.ToRow())
464+
if err != nil {
465+
return err
466+
}
467+
468+
c := assign.Col.ToInfo()
469+
idx := assign.Col.Index
470+
c.Name = assign.ColName
471+
val, err = table.CastValue(e.ctx, val, c, false, false)
472+
if err != nil {
473+
return err
474+
}
475+
476+
_ = errorHandler(e.ctx, assign, &val, nil)
477+
e.evalBuffer4Dup.SetDatum(idx, val)
478+
e.row4Update[assign.Col.Index] = val
479+
assignFlag[assign.Col.Index] = true
440480
}
441481

442482
newData := e.row4Update[:len(oldRow)]
443-
_, err := updateRecord(ctx, e.ctx, handle, oldRow, newData, assignFlag, e.Table, true, e.memTracker, e.fkChecks, e.fkCascades)
483+
_, err := updateRecord(
484+
ctx, e.ctx,
485+
handle, oldRow, newData,
486+
0, generated, e.evalBuffer4Dup, errorHandler,
487+
assignFlag, e.Table,
488+
true, e.memTracker, e.fkChecks, e.fkCascades)
489+
444490
if err != nil {
445-
return err
491+
return errors.Trace(err)
446492
}
447493

448494
if autoColIdx >= 0 {

executor/insert_test.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,24 @@ func testInsertOnDuplicateKey(t *testing.T, tk *testkit.TestKit) {
254254
tk.MustExec(`insert into t1 set c1 = 0.1`)
255255
tk.MustExec(`insert into t1 set c1 = 0.1 on duplicate key update c1 = 1`)
256256
tk.MustQuery(`select * from t1 use index(primary)`).Check(testkit.Rows(`1.0000`))
257+
258+
// Test issue 56829
259+
tk.MustExec(`
260+
CREATE TABLE cache (
261+
cache_key varchar(512) NOT NULL,
262+
updated_at datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
263+
expired_at datetime GENERATED ALWAYS AS (if(expires > 0, date_add(updated_at, interval expires second), date_add(updated_at, interval 99 year))) VIRTUAL,
264+
expires int(11),
265+
PRIMARY KEY (cache_key) /*T![clustered_index] CLUSTERED */,
266+
KEY idx_c_on_expired_at (expired_at)
267+
)`)
268+
tk.MustExec("INSERT INTO cache(cache_key, expires) VALUES ('2001-01-01 11:11:11', 60) ON DUPLICATE KEY UPDATE expires = expires + 1")
269+
tk.MustExec("select sleep(1)")
270+
tk.MustExec("INSERT INTO cache(cache_key, expires) VALUES ('2001-01-01 11:11:11', 60) ON DUPLICATE KEY UPDATE expires = expires + 1")
271+
tk.MustExec("admin check table cache")
272+
rs1 := tk.MustQuery("select cache_key, expired_at from cache use index() order by cache_key")
273+
rs2 := tk.MustQuery("select cache_key, expired_at from cache use index(idx_c_on_expired_at) order by cache_key")
274+
rs1.Check(rs2.Rows())
257275
}
258276

259277
func TestClusterIndexInsertOnDuplicateKey(t *testing.T) {

0 commit comments

Comments
 (0)