Skip to content

Commit 5b91759

Browse files
you06ti-chi-bot
authored andcommitted
This is an automated cherry-pick of pingcap#45814
Signed-off-by: ti-chi-bot <[email protected]>
1 parent fe7bcf1 commit 5b91759

File tree

8 files changed

+2510
-1
lines changed

8 files changed

+2510
-1
lines changed

planner/core/plan_cache.go

Lines changed: 856 additions & 0 deletions
Large diffs are not rendered by default.

planner/core/plan_cache_utils.go

Lines changed: 665 additions & 0 deletions
Large diffs are not rendered by default.

planner/core/point_get_plan.go

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,33 @@ type PointGetPlan struct {
8787
planCost float64
8888
// accessCols represents actual columns the PointGet will access, which are used to calculate row-size
8989
accessCols []*expression.Column
90+
<<<<<<< HEAD
91+
=======
92+
93+
// probeParents records the IndexJoins and Applys with this operator in their inner children.
94+
// Please see comments in PhysicalPlan for details.
95+
probeParents []PhysicalPlan
96+
// stmtHints should restore in executing context.
97+
stmtHints *stmtctx.StmtHints
98+
}
99+
100+
func (p *PointGetPlan) getEstRowCountForDisplay() float64 {
101+
if p == nil {
102+
return 0
103+
}
104+
return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents)
105+
}
106+
107+
func (p *PointGetPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 {
108+
if p == nil {
109+
return 1
110+
}
111+
return getActualProbeCntFromProbeParents(p.probeParents, statsColl)
112+
}
113+
114+
func (p *PointGetPlan) setProbeParents(probeParents []PhysicalPlan) {
115+
p.probeParents = probeParents
116+
>>>>>>> c34f6fc83d6 (planner: store the hints of session variable (#45814))
90117
}
91118

92119
type nameValuePair struct {

session/test/vars/BUILD.bazel

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
load("@io_bazel_rules_go//go:def.bzl", "go_test")
2+
3+
go_test(
4+
name = "vars_test",
5+
timeout = "short",
6+
srcs = [
7+
"main_test.go",
8+
"vars_test.go",
9+
],
10+
flaky = True,
11+
shard_count = 13,
12+
deps = [
13+
"//config",
14+
"//domain",
15+
"//errno",
16+
"//kv",
17+
"//parser/mysql",
18+
"//parser/terror",
19+
"//sessionctx/stmtctx",
20+
"//sessionctx/variable",
21+
"//testkit",
22+
"//testkit/testmain",
23+
"//testkit/testsetup",
24+
"@com_github_pingcap_failpoint//:failpoint",
25+
"@com_github_stretchr_testify//require",
26+
"@com_github_tikv_client_go_v2//tikv",
27+
"@com_github_tikv_client_go_v2//txnkv/transaction",
28+
"@org_uber_go_goleak//:goleak",
29+
],
30+
)

session/test/vars/vars_test.go

Lines changed: 685 additions & 0 deletions
Large diffs are not rendered by default.

sessionctx/stmtctx/BUILD.bazel

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
2+
3+
go_library(
4+
name = "stmtctx",
5+
srcs = ["stmtctx.go"],
6+
importpath = "github.com/pingcap/tidb/sessionctx/stmtctx",
7+
visibility = ["//visibility:public"],
8+
deps = [
9+
"//domain/resourcegroup",
10+
"//errno",
11+
"//parser",
12+
"//parser/ast",
13+
"//parser/model",
14+
"//parser/mysql",
15+
"//parser/terror",
16+
"//util/disk",
17+
"//util/execdetails",
18+
"//util/memory",
19+
"//util/resourcegrouptag",
20+
"//util/topsql/stmtstats",
21+
"//util/tracing",
22+
"@com_github_pingcap_errors//:errors",
23+
"@com_github_tikv_client_go_v2//tikvrpc",
24+
"@com_github_tikv_client_go_v2//util",
25+
"@org_golang_x_exp//maps",
26+
"@org_golang_x_exp//slices",
27+
"@org_uber_go_atomic//:atomic",
28+
"@org_uber_go_zap//:zap",
29+
],
30+
)
31+
32+
go_test(
33+
name = "stmtctx_test",
34+
timeout = "short",
35+
srcs = [
36+
"main_test.go",
37+
"stmtctx_test.go",
38+
],
39+
embed = [":stmtctx"],
40+
flaky = True,
41+
shard_count = 6,
42+
deps = [
43+
"//kv",
44+
"//sessionctx/variable",
45+
"//testkit",
46+
"//testkit/testsetup",
47+
"//util/execdetails",
48+
"@com_github_pingcap_errors//:errors",
49+
"@com_github_stretchr_testify//require",
50+
"@com_github_tikv_client_go_v2//util",
51+
"@org_uber_go_atomic//:atomic",
52+
"@org_uber_go_goleak//:goleak",
53+
],
54+
)

sessionctx/stmtctx/stmtctx.go

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,6 @@ type StatementContext struct {
300300
type StmtHints struct {
301301
// Hint Information
302302
MemQuotaQuery int64
303-
ApplyCacheCapacity int64
304303
MaxExecutionTime uint64
305304
ReplicaRead byte
306305
AllowInSubqToJoinAndAgg bool
@@ -329,6 +328,45 @@ func (sh *StmtHints) TaskMapNeedBackUp() bool {
329328
return sh.ForceNthPlan != -1
330329
}
331330

331+
// Clone the StmtHints struct and returns the pointer of the new one.
332+
func (sh *StmtHints) Clone() *StmtHints {
333+
var (
334+
vars map[string]string
335+
tableHints []*ast.TableOptimizerHint
336+
)
337+
if len(sh.SetVars) > 0 {
338+
vars = make(map[string]string, len(sh.SetVars))
339+
for k, v := range sh.SetVars {
340+
vars[k] = v
341+
}
342+
}
343+
if len(sh.OriginalTableHints) > 0 {
344+
tableHints = make([]*ast.TableOptimizerHint, len(sh.OriginalTableHints))
345+
copy(tableHints, sh.OriginalTableHints)
346+
}
347+
return &StmtHints{
348+
MemQuotaQuery: sh.MemQuotaQuery,
349+
MaxExecutionTime: sh.MaxExecutionTime,
350+
TidbKvReadTimeout: sh.TidbKvReadTimeout,
351+
ReplicaRead: sh.ReplicaRead,
352+
AllowInSubqToJoinAndAgg: sh.AllowInSubqToJoinAndAgg,
353+
NoIndexMergeHint: sh.NoIndexMergeHint,
354+
StraightJoinOrder: sh.StraightJoinOrder,
355+
EnableCascadesPlanner: sh.EnableCascadesPlanner,
356+
ForceNthPlan: sh.ForceNthPlan,
357+
ResourceGroup: sh.ResourceGroup,
358+
HasAllowInSubqToJoinAndAggHint: sh.HasAllowInSubqToJoinAndAggHint,
359+
HasMemQuotaHint: sh.HasMemQuotaHint,
360+
HasReplicaReadHint: sh.HasReplicaReadHint,
361+
HasMaxExecutionTime: sh.HasMaxExecutionTime,
362+
HasTidbKvReadTimeout: sh.HasTidbKvReadTimeout,
363+
HasEnableCascadesPlannerHint: sh.HasEnableCascadesPlannerHint,
364+
HasResourceGroup: sh.HasResourceGroup,
365+
SetVars: vars,
366+
OriginalTableHints: tableHints,
367+
}
368+
}
369+
332370
// StmtCacheKey represents the key type in the StmtCache.
333371
type StmtCacheKey int
334372

sessionctx/stmtctx/stmtctx_test.go

Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,12 @@ package stmtctx_test
1717
import (
1818
"context"
1919
"fmt"
20+
<<<<<<< HEAD
21+
=======
22+
"math/rand"
23+
"reflect"
24+
"sort"
25+
>>>>>>> c34f6fc83d6 (planner: store the hints of session variable (#45814))
2026
"testing"
2127
"time"
2228

@@ -143,3 +149,151 @@ func TestWeakConsistencyRead(t *testing.T) {
143149
execAndCheck("execute s", testkit.Rows("1 1 2"), kv.SI)
144150
tk.MustExec("rollback")
145151
}
152+
<<<<<<< HEAD
153+
=======
154+
155+
func TestMarshalSQLWarn(t *testing.T) {
156+
warns := []stmtctx.SQLWarn{
157+
{
158+
Level: stmtctx.WarnLevelError,
159+
Err: errors.New("any error"),
160+
},
161+
{
162+
Level: stmtctx.WarnLevelError,
163+
Err: errors.Trace(errors.New("any error")),
164+
},
165+
{
166+
Level: stmtctx.WarnLevelWarning,
167+
Err: variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown"),
168+
},
169+
{
170+
Level: stmtctx.WarnLevelWarning,
171+
Err: errors.Trace(variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown")),
172+
},
173+
}
174+
175+
store := testkit.CreateMockStore(t)
176+
tk := testkit.NewTestKit(t, store)
177+
// First query can trigger loading global variables, which produces warnings.
178+
tk.MustQuery("select 1")
179+
tk.Session().GetSessionVars().StmtCtx.SetWarnings(warns)
180+
rows := tk.MustQuery("show warnings").Rows()
181+
require.Equal(t, len(warns), len(rows))
182+
183+
// The unmarshalled result doesn't need to be exactly the same with the original one.
184+
// We only need that the results of `show warnings` are the same.
185+
bytes, err := json.Marshal(warns)
186+
require.NoError(t, err)
187+
var newWarns []stmtctx.SQLWarn
188+
err = json.Unmarshal(bytes, &newWarns)
189+
require.NoError(t, err)
190+
tk.Session().GetSessionVars().StmtCtx.SetWarnings(newWarns)
191+
tk.MustQuery("show warnings").Check(rows)
192+
}
193+
194+
func TestApproxRuntimeInfo(t *testing.T) {
195+
var n = rand.Intn(19000) + 1000
196+
var valRange = rand.Int31n(10000) + 1000
197+
backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"}
198+
details := []*execdetails.ExecDetails{}
199+
for i := 0; i < n; i++ {
200+
d := &execdetails.ExecDetails{
201+
DetailsNeedP90: execdetails.DetailsNeedP90{
202+
CalleeAddress: fmt.Sprintf("%v", i+1),
203+
BackoffSleep: make(map[string]time.Duration),
204+
BackoffTimes: make(map[string]int),
205+
TimeDetail: util.TimeDetail{
206+
ProcessTime: time.Second * time.Duration(rand.Int31n(valRange)),
207+
WaitTime: time.Millisecond * time.Duration(rand.Int31n(valRange)),
208+
},
209+
},
210+
}
211+
details = append(details, d)
212+
for _, backoff := range backoffs {
213+
d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(rand.Int31n(valRange))
214+
d.BackoffTimes[backoff] = rand.Intn(int(valRange))
215+
}
216+
}
217+
218+
// Make CalleeAddress for each max value is deterministic.
219+
details[rand.Intn(n)].DetailsNeedP90.TimeDetail.ProcessTime = time.Second * time.Duration(valRange)
220+
details[rand.Intn(n)].DetailsNeedP90.TimeDetail.WaitTime = time.Millisecond * time.Duration(valRange)
221+
for _, backoff := range backoffs {
222+
details[rand.Intn(n)].BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(valRange)
223+
}
224+
225+
ctx := new(stmtctx.StatementContext)
226+
for i := 0; i < n; i++ {
227+
ctx.MergeExecDetails(details[i], nil)
228+
}
229+
d := ctx.CopTasksDetails()
230+
231+
require.Equal(t, d.NumCopTasks, n)
232+
sort.Slice(details, func(i, j int) bool {
233+
return details[i].TimeDetail.ProcessTime.Nanoseconds() < details[j].TimeDetail.ProcessTime.Nanoseconds()
234+
})
235+
var timeSum time.Duration
236+
for _, detail := range details {
237+
timeSum += detail.TimeDetail.ProcessTime
238+
}
239+
require.Equal(t, d.AvgProcessTime, timeSum/time.Duration(n))
240+
require.InEpsilon(t, d.P90ProcessTime.Nanoseconds(), details[n*9/10].TimeDetail.ProcessTime.Nanoseconds(), 0.05)
241+
require.Equal(t, d.MaxProcessTime, details[n-1].TimeDetail.ProcessTime)
242+
require.Equal(t, d.MaxProcessAddress, details[n-1].CalleeAddress)
243+
244+
sort.Slice(details, func(i, j int) bool {
245+
return details[i].TimeDetail.WaitTime.Nanoseconds() < details[j].TimeDetail.WaitTime.Nanoseconds()
246+
})
247+
timeSum = 0
248+
for _, detail := range details {
249+
timeSum += detail.TimeDetail.WaitTime
250+
}
251+
require.Equal(t, d.AvgWaitTime, timeSum/time.Duration(n))
252+
require.InEpsilon(t, d.P90WaitTime.Nanoseconds(), details[n*9/10].TimeDetail.WaitTime.Nanoseconds(), 0.05)
253+
require.Equal(t, d.MaxWaitTime, details[n-1].TimeDetail.WaitTime)
254+
require.Equal(t, d.MaxWaitAddress, details[n-1].CalleeAddress)
255+
256+
fields := d.ToZapFields()
257+
require.Equal(t, 9, len(fields))
258+
for _, backoff := range backoffs {
259+
sort.Slice(details, func(i, j int) bool {
260+
return details[i].BackoffSleep[backoff].Nanoseconds() < details[j].BackoffSleep[backoff].Nanoseconds()
261+
})
262+
timeSum = 0
263+
var timesSum = 0
264+
for _, detail := range details {
265+
timeSum += detail.BackoffSleep[backoff]
266+
timesSum += detail.BackoffTimes[backoff]
267+
}
268+
require.Equal(t, d.MaxBackoffAddress[backoff], details[n-1].CalleeAddress)
269+
require.Equal(t, d.MaxBackoffTime[backoff], details[n-1].BackoffSleep[backoff])
270+
require.InEpsilon(t, d.P90BackoffTime[backoff], details[n*9/10].BackoffSleep[backoff], 0.1)
271+
require.Equal(t, d.AvgBackoffTime[backoff], timeSum/time.Duration(n))
272+
273+
require.Equal(t, d.TotBackoffTimes[backoff], timesSum)
274+
require.Equal(t, d.TotBackoffTime[backoff], timeSum)
275+
}
276+
}
277+
278+
func TestStmtHintsClone(t *testing.T) {
279+
hints := stmtctx.StmtHints{}
280+
value := reflect.ValueOf(&hints).Elem()
281+
for i := 0; i < value.NumField(); i++ {
282+
field := value.Field(i)
283+
switch field.Kind() {
284+
case reflect.Int, reflect.Int32, reflect.Int64:
285+
field.SetInt(1)
286+
case reflect.Uint, reflect.Uint32, reflect.Uint64:
287+
field.SetUint(1)
288+
case reflect.Uint8: // byte
289+
field.SetUint(1)
290+
case reflect.Bool:
291+
field.SetBool(true)
292+
case reflect.String:
293+
field.SetString("test")
294+
default:
295+
}
296+
}
297+
require.Equal(t, hints, *hints.Clone())
298+
}
299+
>>>>>>> c34f6fc83d6 (planner: store the hints of session variable (#45814))

0 commit comments

Comments
 (0)