Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ const (
LabelGCWorker = "gcworker"
LabelAnalyze = "analyze"
LabelWorkerPool = "worker-pool"
LabelStats = "stats"

LabelBatchRecvLoop = "batch-recv-loop"
LabelBatchSendLoop = "batch-send-loop"
Expand Down
1 change: 1 addition & 0 deletions pkg/statistics/handle/autoanalyze/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ go_library(
"//pkg/domain/infosync",
"//pkg/infoschema",
"//pkg/meta/model",
"//pkg/metrics",
"//pkg/parser/model",
"//pkg/parser/terror",
"//pkg/sessionctx",
Expand Down
4 changes: 4 additions & 0 deletions pkg/statistics/handle/autoanalyze/autoanalyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"github.com/pingcap/tidb/pkg/domain/infosync"
"github.com/pingcap/tidb/pkg/infoschema"
"github.com/pingcap/tidb/pkg/meta/model"
"github.com/pingcap/tidb/pkg/metrics"
pmodel "github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/parser/terror"
"github.com/pingcap/tidb/pkg/sessionctx"
Expand Down Expand Up @@ -116,6 +117,7 @@ func (sa *statsAnalyze) FinishAnalyzeJob(job *statistics.AnalyzeJob, failReason

// DeleteAnalyzeJobs deletes the analyze jobs whose update time is earlier than updateTime.
func (sa *statsAnalyze) DeleteAnalyzeJobs(updateTime time.Time) error {
defer util.Recover(metrics.LabelAnalyze, "DeleteAnalyzeJobs", nil, false)
return statsutil.CallWithSCtx(sa.statsHandle.SPool(), func(sctx sessionctx.Context) error {
_, _, err := statsutil.ExecRows(sctx, "DELETE FROM mysql.analyze_jobs WHERE update_time < CONVERT_TZ(%?, '+00:00', @@TIME_ZONE)", updateTime.UTC().Format(types.TimeFormat))
return err
Expand All @@ -125,6 +127,7 @@ func (sa *statsAnalyze) DeleteAnalyzeJobs(updateTime time.Time) error {
// CleanupCorruptedAnalyzeJobsOnCurrentInstance cleans up the potentially corrupted analyze job.
// It only cleans up the jobs that are associated with the current instance.
func (sa *statsAnalyze) CleanupCorruptedAnalyzeJobsOnCurrentInstance(currentRunningProcessIDs map[uint64]struct{}) error {
defer util.Recover(metrics.LabelAnalyze, "CleanupCorruptedAnalyzeJobsOnCurrentInstance", nil, false)
return statsutil.CallWithSCtx(sa.statsHandle.SPool(), func(sctx sessionctx.Context) error {
return CleanupCorruptedAnalyzeJobsOnCurrentInstance(sctx, currentRunningProcessIDs)
}, statsutil.FlagWrapTxn)
Expand All @@ -133,6 +136,7 @@ func (sa *statsAnalyze) CleanupCorruptedAnalyzeJobsOnCurrentInstance(currentRunn
// CleanupCorruptedAnalyzeJobsOnDeadInstances removes analyze jobs that may have been corrupted.
// Specifically, it removes jobs associated with instances that no longer exist in the cluster.
func (sa *statsAnalyze) CleanupCorruptedAnalyzeJobsOnDeadInstances() error {
defer util.Recover(metrics.LabelAnalyze, "CleanupCorruptedAnalyzeJobsOnDeadInstances", nil, false)
return statsutil.CallWithSCtx(sa.statsHandle.SPool(), func(sctx sessionctx.Context) error {
return CleanupCorruptedAnalyzeJobsOnDeadInstances(sctx)
}, statsutil.FlagWrapTxn)
Expand Down
2 changes: 2 additions & 0 deletions pkg/statistics/handle/storage/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ go_library(
"//pkg/infoschema",
"//pkg/kv",
"//pkg/meta/model",
"//pkg/metrics",
"//pkg/parser/ast",
"//pkg/parser/model",
"//pkg/parser/mysql",
Expand All @@ -34,6 +35,7 @@ go_library(
"//pkg/statistics/handle/util",
"//pkg/statistics/util",
"//pkg/types",
"//pkg/util",
"//pkg/util/chunk",
"//pkg/util/compress",
"//pkg/util/intest",
Expand Down
3 changes: 3 additions & 0 deletions pkg/statistics/handle/storage/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/infoschema"
"github.com/pingcap/tidb/pkg/metrics"
"github.com/pingcap/tidb/pkg/parser/terror"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
Expand All @@ -32,6 +33,7 @@ import (
"github.com/pingcap/tidb/pkg/statistics/handle/lockstats"
"github.com/pingcap/tidb/pkg/statistics/handle/types"
"github.com/pingcap/tidb/pkg/statistics/handle/util"
tidbutil "github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/sqlexec"
Expand All @@ -55,6 +57,7 @@ func NewStatsGC(statsHandle types.StatsHandle) types.StatsGC {
// For dropped tables, we will first update their version
// so that other tidb could know that table is deleted.
func (gc *statsGCImpl) GCStats(is infoschema.InfoSchema, ddlLease time.Duration) (err error) {
defer tidbutil.Recover(metrics.LabelStats, "GCStats", nil, false)
return util.CallWithSCtx(gc.statsHandle.SPool(), func(sctx sessionctx.Context) error {
return GCStats(sctx, gc.statsHandle, is, ddlLease)
})
Expand Down
2 changes: 2 additions & 0 deletions pkg/statistics/handle/usage/session_stats_collect.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ func (s *statsUsageImpl) needDumpStatsDelta(is infoschema.InfoSchema, dumpAll bo
// DumpStatsDeltaToKV sweeps the whole list and updates the global map, then we dumps every table that held in map to KV.
// If the mode is `DumpDelta`, it will only dump that delta info that `Modify Count / Table Count` greater than a ratio.
func (s *statsUsageImpl) DumpStatsDeltaToKV(dumpAll bool) error {
defer util.Recover(metrics.LabelStats, "DumpStatsDeltaToKV", nil, false)
start := time.Now()
defer func() {
dur := time.Since(start)
Expand Down Expand Up @@ -224,6 +225,7 @@ func (s *statsUsageImpl) dumpTableStatCountToKV(is infoschema.InfoSchema, physic

// DumpColStatsUsageToKV sweeps the whole list, updates the column stats usage map and dumps it to KV.
func (s *statsUsageImpl) DumpColStatsUsageToKV() error {
defer util.Recover(metrics.LabelStats, "DumpColStatsUsageToKV", nil, false)
s.SweepSessionStatsList()
colMap := s.SessionStatsUsage().GetUsageAndReset()
defer func() {
Expand Down