Skip to content

Commit e16613d

Browse files
authored
planner: optimize the performance of PointPlan for Instance Plan Cache (#57260)
ref #54057
1 parent fe6c9b7 commit e16613d

File tree

7 files changed

+248
-18
lines changed

7 files changed

+248
-18
lines changed

pkg/planner/core/casetest/instanceplancache/BUILD.bazel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ go_test(
1212
"others_test.go",
1313
],
1414
flaky = True,
15-
shard_count = 32,
15+
shard_count = 34,
1616
deps = [
1717
"//pkg/parser/auth",
1818
"//pkg/testkit",

pkg/planner/core/casetest/instanceplancache/concurrency_test.go

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -335,6 +335,75 @@ func TestInstancePlanCacheTableIndexScan(t *testing.T) {
335335
wg.Wait()
336336
}
337337

338+
func TestInstancePlanCacheConcurrencyPointPartitioning(t *testing.T) {
339+
store := testkit.CreateMockStore(t)
340+
tk := testkit.NewTestKit(t, store)
341+
tk.MustExec(`use test`)
342+
tk.MustExec(`create table t1 (a int, primary key(a)) partition by hash(a) partitions 10`)
343+
tk.MustExec(`create table t2 (a int, primary key(a)) partition by range(a) (
344+
partition p0 values less than (10),
345+
partition p1 values less than (20),
346+
partition p2 values less than (30),
347+
partition p3 values less than (40),
348+
partition p4 values less than (50),
349+
partition p5 values less than (60),
350+
partition p6 values less than (70),
351+
partition p7 values less than (80),
352+
partition p8 values less than (90),
353+
partition p9 values less than (100))`)
354+
tk.MustExec(`set global tidb_enable_instance_plan_cache=1`)
355+
for i := 0; i < 100; i++ {
356+
tk.MustExec(fmt.Sprintf("insert into t1 values (%v)", i))
357+
tk.MustExec(fmt.Sprintf("insert into t2 values (%v)", i))
358+
}
359+
360+
var wg sync.WaitGroup
361+
for i := 0; i < 10; i++ {
362+
wg.Add(1)
363+
go func() {
364+
defer wg.Done()
365+
tki := testkit.NewTestKit(t, store)
366+
tki.MustExec(`use test`)
367+
for k := 0; k < 100; k++ {
368+
tName := fmt.Sprintf("t%v", rand.Intn(2)+1)
369+
tki.MustExec(fmt.Sprintf("prepare st from 'select * from %v where a=?'", tName))
370+
a := rand.Intn(100)
371+
tki.MustExec("set @a = ?", a)
372+
tki.MustQuery("execute st using @a").Check(testkit.Rows(fmt.Sprintf("%v", a)))
373+
}
374+
}()
375+
}
376+
wg.Wait()
377+
}
378+
379+
func TestInstancePlanCacheConcurrencyPointMultipleColPKNoTxn(t *testing.T) {
380+
store := testkit.CreateMockStore(t)
381+
tk := testkit.NewTestKit(t, store)
382+
tk.MustExec(`use test`)
383+
tk.MustExec(`create table t (a int, b int, primary key(a, b))`)
384+
tk.MustExec(`set global tidb_enable_instance_plan_cache=1`)
385+
for i := 0; i < 100; i++ {
386+
tk.MustExec(fmt.Sprintf("insert into t values (%v, %v)", i, i))
387+
}
388+
389+
var wg sync.WaitGroup
390+
for i := 0; i < 10; i++ {
391+
wg.Add(1)
392+
go func() {
393+
defer wg.Done()
394+
tki := testkit.NewTestKit(t, store)
395+
tki.MustExec(`use test`)
396+
tki.MustExec(`prepare st from 'select * from t where a=? and b=?'`)
397+
for k := 0; k < 100; k++ {
398+
a := rand.Intn(100)
399+
tki.MustExec("set @a = ?, @b = ?", a, a)
400+
tki.MustQuery("execute st using @a, @b").Check(testkit.Rows(fmt.Sprintf("%v %v", a, a)))
401+
}
402+
}()
403+
}
404+
wg.Wait()
405+
}
406+
338407
func TestInstancePlanCacheConcurrencyPointNoTxn(t *testing.T) {
339408
store := testkit.CreateMockStore(t)
340409
tk := testkit.NewTestKit(t, store)

pkg/planner/core/plan_cache.go

Lines changed: 34 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,9 @@ func GetPlanFromPlanCache(ctx context.Context, sctx sessionctx.Context,
226226
if stmtCtx.UseCache() {
227227
plan, outputCols, stmtHints, hit := lookupPlanCache(ctx, sctx, cacheKey, paramTypes)
228228
skipPrivCheck := stmt.PointGet.Executor != nil // this case is specially handled
229+
if hit && instancePlanCacheEnabled(ctx) {
230+
plan, hit = clonePlanForInstancePlanCache(ctx, sctx, stmt, plan)
231+
}
229232
if hit {
230233
if plan, ok, err := adjustCachedPlan(ctx, sctx, plan, stmtHints, isNonPrepared, skipPrivCheck, binding, is, stmt); err != nil || ok {
231234
return plan, outputCols, err
@@ -236,6 +239,29 @@ func GetPlanFromPlanCache(ctx context.Context, sctx sessionctx.Context,
236239
return generateNewPlan(ctx, sctx, isNonPrepared, is, stmt, cacheKey, paramTypes)
237240
}
238241

242+
func clonePlanForInstancePlanCache(ctx context.Context, sctx sessionctx.Context,
243+
stmt *PlanCacheStmt, plan base.Plan) (clonedPlan base.Plan, ok bool) {
244+
// TODO: add metrics to record the time cost of this clone operation.
245+
fastPoint := stmt.PointGet.Executor != nil // this case is specially handled
246+
pointPlan, isPoint := plan.(*PointGetPlan)
247+
if fastPoint && isPoint { // special optimization for fast point plans
248+
if stmt.PointGet.FastPlan == nil {
249+
stmt.PointGet.FastPlan = new(PointGetPlan)
250+
}
251+
FastClonePointGetForPlanCache(sctx.GetPlanCtx(), pointPlan, stmt.PointGet.FastPlan)
252+
clonedPlan = stmt.PointGet.FastPlan
253+
} else {
254+
clonedPlan, ok = plan.CloneForPlanCache(sctx.GetPlanCtx())
255+
if !ok { // clone the value to solve concurrency problem
256+
return nil, false
257+
}
258+
}
259+
if intest.InTest && ctx.Value(PlanCacheKeyTestClone{}) != nil {
260+
ctx.Value(PlanCacheKeyTestClone{}).(func(plan, cloned base.Plan))(plan, clonedPlan)
261+
}
262+
return clonedPlan, true
263+
}
264+
239265
func instancePlanCacheEnabled(ctx context.Context) bool {
240266
if intest.InTest && ctx.Value(PlanCacheKeyEnableInstancePlanCache{}) != nil {
241267
return true
@@ -252,25 +278,17 @@ func lookupPlanCache(ctx context.Context, sctx sessionctx.Context, cacheKey stri
252278
core_metrics.GetPlanCacheLookupDuration(useInstanceCache).Observe(time.Since(begin).Seconds())
253279
}
254280
}(time.Now())
281+
var v any
255282
if useInstanceCache {
256-
if v, hit := domain.GetDomain(sctx).GetInstancePlanCache().Get(cacheKey, paramTypes); hit {
257-
pcv := v.(*PlanCacheValue)
258-
clonedPlan, ok := pcv.Plan.CloneForPlanCache(sctx.GetPlanCtx())
259-
if !ok { // clone the value to solve concurrency problem
260-
return nil, nil, nil, false
261-
}
262-
if intest.InTest && ctx.Value(PlanCacheKeyTestClone{}) != nil {
263-
ctx.Value(PlanCacheKeyTestClone{}).(func(plan, cloned base.Plan))(pcv.Plan, clonedPlan)
264-
}
265-
return clonedPlan, pcv.OutputColumns, pcv.stmtHints, true
266-
}
283+
v, hit = domain.GetDomain(sctx).GetInstancePlanCache().Get(cacheKey, paramTypes)
267284
} else {
268-
if v, hit := sctx.GetSessionPlanCache().Get(cacheKey, paramTypes); hit {
269-
pcv := v.(*PlanCacheValue)
270-
return pcv.Plan, pcv.OutputColumns, pcv.stmtHints, true
271-
}
285+
v, hit = sctx.GetSessionPlanCache().Get(cacheKey, paramTypes)
286+
}
287+
if !hit {
288+
return nil, nil, nil, false
272289
}
273-
return nil, nil, nil, false
290+
pcv := v.(*PlanCacheValue)
291+
return pcv.Plan, pcv.OutputColumns, pcv.stmtHints, true
274292
}
275293

276294
func adjustCachedPlan(ctx context.Context, sctx sessionctx.Context,

pkg/planner/core/plan_cache_rebuild_test.go

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,19 @@ import (
1818
"context"
1919
"fmt"
2020
"math/rand"
21+
"os"
2122
"reflect"
2223
"strings"
2324
"testing"
2425
"unsafe"
2526

2627
"github.com/pingcap/errors"
2728
"github.com/pingcap/tidb/pkg/expression"
29+
"github.com/pingcap/tidb/pkg/parser"
30+
"github.com/pingcap/tidb/pkg/planner"
2831
"github.com/pingcap/tidb/pkg/planner/core"
2932
"github.com/pingcap/tidb/pkg/planner/core/base"
33+
"github.com/pingcap/tidb/pkg/planner/core/resolve"
3034
"github.com/pingcap/tidb/pkg/planner/util"
3135
"github.com/pingcap/tidb/pkg/testkit"
3236
"github.com/stretchr/testify/require"
@@ -441,3 +445,86 @@ type visit struct {
441445
a2 unsafe.Pointer
442446
typ reflect.Type
443447
}
448+
449+
func TestFastPointGetClone(t *testing.T) {
450+
codeFile := "plan_clone_utils.go"
451+
codeData, err := os.ReadFile(codeFile)
452+
require.NoError(t, err)
453+
codeLines := strings.Split(string(codeData), "\n")
454+
beginPrefix := `func FastClonePointGetForPlanCache(`
455+
endPrefix := `}`
456+
beginIdx, endIdx := -1, -1
457+
for i, line := range codeLines {
458+
if strings.HasPrefix(line, beginPrefix) {
459+
beginIdx = i
460+
}
461+
if beginIdx != -1 && strings.HasPrefix(line, endPrefix) {
462+
endIdx = i
463+
break
464+
}
465+
}
466+
cloneFuncCode := strings.Join(codeLines[beginIdx:endIdx+1], "\n")
467+
fieldNoNeedToClone := map[string]struct{}{
468+
"cost": {},
469+
"planCostInit": {},
470+
"planCost": {},
471+
"planCostVer2": {},
472+
"accessCols": {},
473+
}
474+
475+
pointPlan := reflect.TypeOf(core.PointGetPlan{})
476+
for i := 0; i < pointPlan.NumField(); i++ {
477+
fieldName := pointPlan.Field(i).Name
478+
if _, ok := fieldNoNeedToClone[fieldName]; ok {
479+
continue
480+
}
481+
assignFieldCode := fmt.Sprintf("%v =", fieldName)
482+
if !strings.Contains(cloneFuncCode, assignFieldCode) {
483+
errMsg := fmt.Sprintf("field %v might not be set in FastClonePointGetForPlanCache correctly", fieldName)
484+
t.Fatal(errMsg)
485+
}
486+
}
487+
}
488+
489+
func BenchmarkPointGetCloneFast(b *testing.B) {
490+
store, domain := testkit.CreateMockStoreAndDomain(b)
491+
tk := testkit.NewTestKit(b, store)
492+
tk.MustExec(`use test`)
493+
tk.MustExec(`create table t (a int, b int, primary key(a, b))`)
494+
495+
p := parser.New()
496+
stmt, err := p.ParseOneStmt("select a, b from t where a=1 and b=1", "", "")
497+
require.NoError(b, err)
498+
nodeW := resolve.NewNodeW(stmt)
499+
plan, _, err := planner.Optimize(context.TODO(), tk.Session(), nodeW, domain.InfoSchema())
500+
require.NoError(b, err)
501+
502+
b.ResetTimer()
503+
src := plan.(*core.PointGetPlan)
504+
dst := new(core.PointGetPlan)
505+
sctx := tk.Session().GetPlanCtx()
506+
for i := 0; i < b.N; i++ {
507+
core.FastClonePointGetForPlanCache(sctx, src, dst)
508+
}
509+
}
510+
511+
func BenchmarkPointGetClone(b *testing.B) {
512+
store, domain := testkit.CreateMockStoreAndDomain(b)
513+
tk := testkit.NewTestKit(b, store)
514+
tk.MustExec(`use test`)
515+
tk.MustExec(`create table t (a int, b int, primary key(a, b))`)
516+
517+
p := parser.New()
518+
stmt, err := p.ParseOneStmt("select a, b from t where a=1 and b=1", "", "")
519+
require.NoError(b, err)
520+
nodeW := resolve.NewNodeW(stmt)
521+
plan, _, err := planner.Optimize(context.TODO(), tk.Session(), nodeW, domain.InfoSchema())
522+
require.NoError(b, err)
523+
524+
b.ResetTimer()
525+
src := plan.(*core.PointGetPlan)
526+
sctx := tk.Session().GetPlanCtx()
527+
for i := 0; i < b.N; i++ {
528+
src.CloneForPlanCache(sctx)
529+
}
530+
}

pkg/planner/core/plan_cache_utils.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -523,6 +523,11 @@ type PointGetExecutorCache struct {
523523
// Notice that we should only cache the PointGetExecutor that have a snapshot with MaxTS in it.
524524
// If the current plan is not PointGet or does not use MaxTS optimization, this value should be nil here.
525525
Executor any
526+
527+
// FastPlan is only used for instance plan cache.
528+
// To ensure thread-safe, we have to clone each plan before reusing if using instance plan cache.
529+
// To reduce the memory allocation and increase performance, we cache the FastPlan here.
530+
FastPlan *PointGetPlan
526531
}
527532

528533
// PlanCacheStmt store prepared ast from PrepareExec and other related fields

pkg/planner/core/plan_clone_utils.go

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ package core
1717
import (
1818
"github.com/pingcap/tidb/pkg/expression"
1919
"github.com/pingcap/tidb/pkg/planner/core/base"
20+
"github.com/pingcap/tidb/pkg/types"
2021
)
2122

2223
func clonePhysicalPlansForPlanCache(newCtx base.PlanContext, plans []base.PhysicalPlan) ([]base.PhysicalPlan, bool) {
@@ -172,3 +173,51 @@ func cloneConstant2DForPlanCache(constants [][]*expression.Constant) [][]*expres
172173
}
173174
return cloned
174175
}
176+
177+
// FastClonePointGetForPlanCache is a fast path to clone a PointGetPlan for plan cache.
178+
func FastClonePointGetForPlanCache(newCtx base.PlanContext, src, dst *PointGetPlan) *PointGetPlan {
179+
if dst == nil {
180+
dst = new(PointGetPlan)
181+
}
182+
dst.Plan = src.Plan
183+
dst.Plan.SetSCtx(newCtx)
184+
dst.probeParents = src.probeParents
185+
dst.PartitionNames = src.PartitionNames
186+
dst.dbName = src.dbName
187+
dst.schema = src.schema
188+
dst.TblInfo = src.TblInfo
189+
dst.IndexInfo = src.IndexInfo
190+
dst.PartitionIdx = nil // partition prune will be triggered during execution phase
191+
dst.Handle = nil // handle will be set during rebuild phase
192+
if src.HandleConstant == nil {
193+
dst.HandleConstant = nil
194+
} else {
195+
if src.HandleConstant.SafeToShareAcrossSession() {
196+
dst.HandleConstant = src.HandleConstant
197+
} else {
198+
dst.HandleConstant = src.HandleConstant.Clone().(*expression.Constant)
199+
}
200+
}
201+
dst.handleFieldType = src.handleFieldType
202+
dst.HandleColOffset = src.HandleColOffset
203+
if len(dst.IndexValues) < len(src.IndexValues) { // actually set during rebuild phase
204+
dst.IndexValues = make([]types.Datum, len(src.IndexValues))
205+
} else {
206+
dst.IndexValues = dst.IndexValues[:len(src.IndexValues)]
207+
}
208+
dst.IndexConstants = cloneConstantsForPlanCache(src.IndexConstants, dst.IndexConstants)
209+
dst.ColsFieldType = src.ColsFieldType
210+
dst.IdxCols = cloneColumnsForPlanCache(src.IdxCols, dst.IdxCols)
211+
dst.IdxColLens = src.IdxColLens
212+
dst.AccessConditions = cloneExpressionsForPlanCache(src.AccessConditions, dst.AccessConditions)
213+
dst.UnsignedHandle = src.UnsignedHandle
214+
dst.IsTableDual = src.IsTableDual
215+
dst.Lock = src.Lock
216+
dst.outputNames = src.outputNames
217+
dst.LockWaitTime = src.LockWaitTime
218+
dst.Columns = src.Columns
219+
220+
// remaining fields are unnecessary to clone:
221+
// cost, planCostInit, planCost, planCostVer2, accessCols
222+
return dst
223+
}

pkg/planner/core/point_get_plan.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,14 +105,16 @@ type PointGetPlan struct {
105105
outputNames []*types.FieldName `plan-cache-clone:"shallow"`
106106
LockWaitTime int64
107107
Columns []*model.ColumnInfo `plan-cache-clone:"shallow"`
108-
cost float64
109108

110109
// required by cost model
110+
cost float64
111111
planCostInit bool
112112
planCost float64
113113
planCostVer2 costusage.CostVer2 `plan-cache-clone:"shallow"`
114114
// accessCols represents actual columns the PointGet will access, which are used to calculate row-size
115115
accessCols []*expression.Column
116+
117+
// NOTE: please update FastClonePointGetForPlanCache accordingly if you add new fields here.
116118
}
117119

118120
// GetEstRowCountForDisplay implements PhysicalPlan interface.

0 commit comments

Comments
 (0)