@@ -18,7 +18,6 @@ import (
18
18
"context"
19
19
"sync"
20
20
"sync/atomic"
21
- "time"
22
21
23
22
"github.com/pingcap/errors"
24
23
"github.com/pingcap/failpoint"
@@ -29,7 +28,6 @@ import (
29
28
"github.com/pingcap/tidb/pkg/parser/mysql"
30
29
"github.com/pingcap/tidb/pkg/parser/terror"
31
30
"github.com/pingcap/tidb/pkg/sessionctx"
32
- "github.com/pingcap/tidb/pkg/sessionctx/stmtctx"
33
31
"github.com/pingcap/tidb/pkg/sessionctx/variable"
34
32
"github.com/pingcap/tidb/pkg/statistics"
35
33
"github.com/pingcap/tidb/pkg/statistics/handle/cache"
@@ -117,8 +115,8 @@ func (h *Handle) initStatsMeta(is infoschema.InfoSchema) (util.StatsCache, error
117
115
return tables , nil
118
116
}
119
117
120
- // initStatsHistogramsSQLGen generates the SQL to load all stats_histograms records.
121
- func initStatsHistogramsSQLGen (isPaging bool ) string {
118
+ // genInitStatsHistogramsSQL generates the SQL to load all stats_histograms records.
119
+ func genInitStatsHistogramsSQL (isPaging bool ) string {
122
120
selectPrefix := "select /*+ ORDER_INDEX(mysql.stats_histograms,tbl) */ HIGH_PRIORITY table_id, is_index, hist_id, distinct_count, version, null_count, cm_sketch, tot_col_size, stats_ver, correlation, flag, last_analyze_pos from mysql.stats_histograms"
123
121
orderSuffix := " order by table_id"
124
122
if ! isPaging {
@@ -311,7 +309,7 @@ func (h *Handle) initStatsHistograms4Chunk(is infoschema.InfoSchema, cache util.
311
309
}
312
310
313
311
func (h * Handle ) initStatsHistogramsLite (is infoschema.InfoSchema , cache util.StatsCache ) error {
314
- sql := initStatsHistogramsSQLGen (false )
312
+ sql := genInitStatsHistogramsSQL (false )
315
313
rc , err := util .Exec (h .initStatsCtx , sql )
316
314
if err != nil {
317
315
return errors .Trace (err )
@@ -334,7 +332,7 @@ func (h *Handle) initStatsHistogramsLite(is infoschema.InfoSchema, cache util.St
334
332
}
335
333
336
334
func (h * Handle ) initStatsHistograms (is infoschema.InfoSchema , cache util.StatsCache ) error {
337
- sql := initStatsHistogramsSQLGen (false )
335
+ sql := genInitStatsHistogramsSQL (false )
338
336
rc , err := util .Exec (h .initStatsCtx , sql )
339
337
if err != nil {
340
338
return errors .Trace (err )
@@ -371,7 +369,7 @@ func (h *Handle) initStatsHistogramsByPaging(is infoschema.InfoSchema, cache uti
371
369
}()
372
370
373
371
sctx := se .(sessionctx.Context )
374
- sql := initStatsHistogramsSQLGen (true )
372
+ sql := genInitStatsHistogramsSQL (true )
375
373
rc , err := util .Exec (sctx , sql , task .StartTid , task .EndTid )
376
374
if err != nil {
377
375
return errors .Trace (err )
@@ -585,14 +583,13 @@ func (h *Handle) initStatsFMSketch(cache util.StatsCache) error {
585
583
586
584
func (* Handle ) initStatsBuckets4Chunk (cache util.StatsCache , iter * chunk.Iterator4Chunk ) {
587
585
var table * statistics.Table
588
- unspecifiedLengthTp := types .NewFieldType (mysql .TypeBlob )
589
586
var (
590
587
hasErr bool
591
588
failedTableID int64
592
589
failedHistID int64
593
590
)
594
591
for row := iter .Begin (); row != iter .End (); row = iter .Next () {
595
- tableID , isIndex , histID := row .GetInt64 (0 ), row .GetInt64 (1 ), row . GetInt64 ( 2 )
592
+ tableID , histID := row .GetInt64 (0 ), row .GetInt64 (1 )
596
593
if table == nil || table .PhysicalID != tableID {
597
594
if table != nil {
598
595
for _ , index := range table .Indices {
@@ -609,63 +606,13 @@ func (*Handle) initStatsBuckets4Chunk(cache util.StatsCache, iter *chunk.Iterato
609
606
}
610
607
var lower , upper types.Datum
611
608
var hist * statistics.Histogram
612
- if isIndex > 0 {
613
- index , ok := table .Indices [histID ]
614
- if ! ok {
615
- continue
616
- }
617
- hist = & index .Histogram
618
- lower , upper = types .NewBytesDatum (row .GetBytes (5 )), types .NewBytesDatum (row .GetBytes (6 ))
619
- } else {
620
- column , ok := table .Columns [histID ]
621
- if ! ok {
622
- continue
623
- }
624
- if ! mysql .HasPriKeyFlag (column .Info .GetFlag ()) {
625
- continue
626
- }
627
- hist = & column .Histogram
628
- d := types .NewBytesDatum (row .GetBytes (5 ))
629
- // Setting TimeZone to time.UTC aligns with HistogramFromStorage and can fix #41938. However, #41985 still exist.
630
- // TODO: do the correct time zone conversion for timestamp-type columns' upper/lower bounds.
631
- sc := stmtctx .NewStmtCtxWithTimeZone (time .UTC )
632
- sc .AllowInvalidDate = true
633
- sc .IgnoreZeroInDate = true
634
- var err error
635
- if column .Info .FieldType .EvalType () == types .ETString && column .Info .FieldType .GetType () != mysql .TypeEnum && column .Info .FieldType .GetType () != mysql .TypeSet {
636
- // For new collation data, when storing the bounds of the histogram, we store the collate key instead of the
637
- // original value.
638
- // But there's additional conversion logic for new collation data, and the collate key might be longer than
639
- // the FieldType.flen.
640
- // If we use the original FieldType here, there might be errors like "Invalid utf8mb4 character string"
641
- // or "Data too long".
642
- // So we change it to TypeBlob to bypass those logics here.
643
- lower , err = d .ConvertTo (sc , unspecifiedLengthTp )
644
- } else {
645
- lower , err = d .ConvertTo (sc , & column .Info .FieldType )
646
- }
647
- if err != nil {
648
- hasErr = true
649
- failedTableID = tableID
650
- failedHistID = histID
651
- delete (table .Columns , histID )
652
- continue
653
- }
654
- d = types .NewBytesDatum (row .GetBytes (6 ))
655
- if column .Info .FieldType .EvalType () == types .ETString && column .Info .FieldType .GetType () != mysql .TypeEnum && column .Info .FieldType .GetType () != mysql .TypeSet {
656
- upper , err = d .ConvertTo (sc , unspecifiedLengthTp )
657
- } else {
658
- upper , err = d .ConvertTo (sc , & column .Info .FieldType )
659
- }
660
- if err != nil {
661
- hasErr = true
662
- failedTableID = tableID
663
- failedHistID = histID
664
- delete (table .Columns , histID )
665
- continue
666
- }
609
+ index , ok := table .Indices [histID ]
610
+ if ! ok {
611
+ continue
667
612
}
668
- hist .AppendBucketWithNDV (& lower , & upper , row .GetInt64 (3 ), row .GetInt64 (4 ), row .GetInt64 (7 ))
613
+ hist = & index .Histogram
614
+ lower , upper = types .NewBytesDatum (row .GetBytes (4 ) /*lower_bound*/ ), types .NewBytesDatum (row .GetBytes (5 ) /*upper_bound*/ )
615
+ hist .AppendBucketWithNDV (& lower , & upper , row .GetInt64 (2 ) /*count*/ , row .GetInt64 (3 ) /*repeats*/ , row .GetInt64 (6 ) /*ndv*/ )
669
616
}
670
617
if table != nil {
671
618
cache .Put (table .PhysicalID , table ) // put this table in the cache because all statstics of the table have been read.
@@ -685,7 +632,7 @@ func (h *Handle) initStatsBuckets(cache util.StatsCache, totalMemory uint64) err
685
632
return errors .Trace (err )
686
633
}
687
634
} else {
688
- sql := "select /*+ ORDER_INDEX(mysql.stats_buckets,tbl)*/ HIGH_PRIORITY table_id, is_index, hist_id, count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where is_index=1 order by table_id, is_index, hist_id, bucket_id"
635
+ sql := "select /*+ ORDER_INDEX(mysql.stats_buckets,tbl)*/ HIGH_PRIORITY table_id, hist_id, count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where is_index=1 order by table_id, is_index, hist_id, bucket_id"
689
636
rc , err := util .Exec (h .initStatsCtx , sql )
690
637
if err != nil {
691
638
return errors .Trace (err )
@@ -738,7 +685,7 @@ func (h *Handle) initStatsBucketsByPaging(cache util.StatsCache, task initstats.
738
685
}
739
686
}()
740
687
sctx := se .(sessionctx.Context )
741
- sql := "select HIGH_PRIORITY table_id, is_index, hist_id, count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where is_index = 1 and table_id >= %? and table_id < %? order by table_id, is_index, hist_id, bucket_id"
688
+ sql := "select HIGH_PRIORITY table_id, hist_id, count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where is_index = 1 and table_id >= %? and table_id < %? order by table_id, is_index, hist_id, bucket_id"
742
689
rc , err := util .Exec (sctx , sql , task .StartTid , task .EndTid )
743
690
if err != nil {
744
691
return errors .Trace (err )
0 commit comments