Skip to content

Commit e6b4d95

Browse files
authored
log restore: fix the bottleneck of btree map (#59896)
close #59900
1 parent b2a9059 commit e6b4d95

File tree

2 files changed

+30
-20
lines changed

2 files changed

+30
-20
lines changed

br/pkg/restore/log_client/client_test.go

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1513,15 +1513,20 @@ func TestLogSplitStrategy(t *testing.T) {
15131513
// Create a split client with the mock PD client.
15141514
client := split.NewClient(mockPDCli, nil, nil, 100, 4)
15151515

1516+
// these files should skip accumulation
1517+
smallFiles := make([]*backuppb.DataFileInfo, 0, 10)
1518+
for j := 0; j < 20; j++ {
1519+
smallFiles = append(smallFiles, fakeFile(1, 100, 1024*1024, 100))
1520+
}
1521+
15161522
// Define a mock iterator with sample data files.
1517-
mockIter := iter.FromSlice([]*backuppb.DataFileInfo{
1518-
fakeFile(1, 100, 100, 100),
1523+
mockIter := iter.FromSlice(append(smallFiles, []*backuppb.DataFileInfo{
15191524
fakeFile(1, 200, 2*units.MiB, 200),
15201525
fakeFile(2, 100, 3*units.MiB, 300),
15211526
fakeFile(3, 100, 10*units.MiB, 100000),
15221527
fakeFile(1, 300, 3*units.MiB, 10),
15231528
fakeFile(1, 400, 4*units.MiB, 10),
1524-
})
1529+
}...))
15251530
logIter := toLogDataFileInfoIter(mockIter)
15261531

15271532
// Initialize a wrapper for the file restorer with a region splitter.
@@ -1548,7 +1553,7 @@ func TestLogSplitStrategy(t *testing.T) {
15481553
count := 0
15491554
for i := helper.TryNext(ctx); !i.Finished; i = helper.TryNext(ctx) {
15501555
require.NoError(t, i.Err)
1551-
if count == expectSplitCount {
1556+
if count == len(smallFiles)+expectSplitCount {
15521557
// Verify that no split occurs initially due to insufficient data.
15531558
regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0)
15541559
require.NoError(t, err)
@@ -1562,6 +1567,9 @@ func TestLogSplitStrategy(t *testing.T) {
15621567
count += 1
15631568
}
15641569

1570+
// iterate 20 small files + 4 valid files
1571+
require.Equal(t, len(smallFiles)+4, count)
1572+
15651573
// Verify that a split occurs on the second region due to excess data.
15661574
regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0)
15671575
require.NoError(t, err)

br/pkg/restore/log_client/log_split_strategy.go

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -63,25 +63,27 @@ func NewLogSplitStrategy(
6363
const splitFileThreshold = 1024 * 1024 // 1 MB
6464

6565
func (ls *LogSplitStrategy) Accumulate(file *LogDataFileInfo) {
66+
// skip accumulate file less than 1MB. to prevent too much split & scatter occurs
67+
// and protect the performance of BTreeMap
6668
if file.Length > splitFileThreshold {
6769
ls.AccumulateCount += 1
68-
}
69-
splitHelper, exist := ls.TableSplitter[file.TableId]
70-
if !exist {
71-
splitHelper = split.NewSplitHelper()
72-
ls.TableSplitter[file.TableId] = splitHelper
73-
}
70+
splitHelper, exist := ls.TableSplitter[file.TableId]
71+
if !exist {
72+
splitHelper = split.NewSplitHelper()
73+
ls.TableSplitter[file.TableId] = splitHelper
74+
}
7475

75-
splitHelper.Merge(split.Valued{
76-
Key: split.Span{
77-
StartKey: file.StartKey,
78-
EndKey: file.EndKey,
79-
},
80-
Value: split.Value{
81-
Size: file.Length,
82-
Number: file.NumberOfEntries,
83-
},
84-
})
76+
splitHelper.Merge(split.Valued{
77+
Key: split.Span{
78+
StartKey: file.StartKey,
79+
EndKey: file.EndKey,
80+
},
81+
Value: split.Value{
82+
Size: file.Length,
83+
Number: file.NumberOfEntries,
84+
},
85+
})
86+
}
8587
}
8688

8789
func (ls *LogSplitStrategy) ShouldSplit() bool {

0 commit comments

Comments
 (0)