@@ -42,23 +42,18 @@ func init() {
42
42
return true
43
43
})
44
44
}
45
- metrics .DDLSetTempIndexWrite = func (connID uint64 , tableID int64 , opCount uint64 , doubleWrite bool ) {
45
+ metrics .DDLAddOneTempIndexWrite = func (connID uint64 , tableID int64 , doubleWrite bool ) {
46
46
c , _ := coll .write .LoadOrStore (connID , & connIDCollector {
47
47
tblID2Count : sync.Map {},
48
48
})
49
49
//nolint:forcetypeassert
50
- tc , _ := c .(* connIDCollector ).tblID2Count .LoadOrStore (tableID , & tableCollector {
51
- singleWriteCnt : & atomic.Uint64 {},
52
- doubleWriteCnt : & atomic.Uint64 {},
53
- totalSingleWriteCnt : & atomic.Uint64 {},
54
- totalDoubleWriteCnt : & atomic.Uint64 {},
55
- })
50
+ tc , _ := c .(* connIDCollector ).tblID2Count .LoadOrStore (tableID , & tableCollector {})
56
51
if doubleWrite {
57
52
//nolint:forcetypeassert
58
- tc .(* tableCollector ).doubleWriteCnt .Add (opCount )
53
+ tc .(* tableCollector ).doubleWriteCnt .Add (1 )
59
54
} else {
60
55
//nolint:forcetypeassert
61
- tc .(* tableCollector ).singleWriteCnt .Add (opCount )
56
+ tc .(* tableCollector ).singleWriteCnt .Add (1 )
62
57
}
63
58
}
64
59
metrics .DDLRollbackTempIndexWrite = func (connID uint64 ) {
@@ -83,77 +78,65 @@ func init() {
83
78
connIDCollector .tblID2Count .Delete (tblID )
84
79
return true
85
80
})
86
- coll .merge .Delete (tblID )
87
- coll .scan .Delete (tblID )
81
+ coll .read .Delete (tblID )
88
82
}
89
- metrics .DDLSetTempIndexScan = func (tableID int64 , opCount uint64 ) {
90
- c , _ := coll .scan .LoadOrStore (tableID , & atomic.Uint64 {})
91
- //nolint:forcetypeassert
92
- c .(* atomic.Uint64 ).Add (opCount )
83
+ metrics .DDLClearTempIndexWrite = func (connID uint64 ) {
84
+ coll .write .Delete (connID )
93
85
}
94
- metrics .DDLSetTempIndexMerge = func (tableID int64 , opCount uint64 ) {
95
- c , _ := coll .merge .LoadOrStore (tableID , & atomic.Uint64 {})
86
+
87
+ metrics .DDLSetTempIndexScanAndMerge = func (tableID int64 , scanCnt , mergeCnt uint64 ) {
88
+ c , _ := coll .read .LoadOrStore (tableID , & mergeAndScan {})
96
89
//nolint:forcetypeassert
97
- c .(* atomic.Uint64 ).Add (opCount )
90
+ c .(* mergeAndScan ).scan .Add (scanCnt )
91
+ //nolint:forcetypeassert
92
+ c .(* mergeAndScan ).merge .Add (mergeCnt )
98
93
}
99
94
}
100
95
96
+ const (
97
+ labelSingleWrite = "single_write"
98
+ labelDoubleWrite = "double_write"
99
+ labelMerge = "merge"
100
+ labelScan = "scan"
101
+ )
102
+
101
103
type collector struct {
102
104
write sync.Map // connectionID => connIDCollector
103
- merge sync.Map // tableID => atomic.Uint64
104
- scan sync.Map // tableID => atomic.Uint64
105
+ read sync.Map // tableID => mergeAndScan
105
106
106
- singleWriteDesc * prometheus.Desc
107
- doubleWriteDesc * prometheus.Desc
108
- mergeDesc * prometheus.Desc
109
- scanDesc * prometheus.Desc
107
+ desc * prometheus.Desc
108
+ }
109
+
110
+ type mergeAndScan struct {
111
+ merge atomic.Uint64
112
+ scan atomic.Uint64
110
113
}
111
114
112
115
type connIDCollector struct {
113
116
tblID2Count sync.Map // tableID => tableCollector
114
117
}
115
-
116
118
type tableCollector struct {
117
- singleWriteCnt * atomic.Uint64
118
- doubleWriteCnt * atomic.Uint64
119
+ singleWriteCnt atomic.Uint64
120
+ doubleWriteCnt atomic.Uint64
119
121
120
- totalSingleWriteCnt * atomic.Uint64
121
- totalDoubleWriteCnt * atomic.Uint64
122
+ totalSingleWriteCnt atomic.Uint64
123
+ totalDoubleWriteCnt atomic.Uint64
122
124
}
123
125
124
126
func newCollector () * collector {
125
127
return & collector {
126
128
write : sync.Map {},
127
- merge : sync.Map {},
128
- scan : sync.Map {},
129
- singleWriteDesc : prometheus .NewDesc (
130
- "tidb_ddl_temp_index_write" ,
131
- "Gauge of temp index write times" ,
132
- []string {"table_id" }, nil ,
133
- ),
134
- doubleWriteDesc : prometheus .NewDesc (
135
- "tidb_ddl_temp_index_double_write" ,
136
- "Gauge of temp index double write times" ,
137
- []string {"table_id" }, nil ,
138
- ),
139
- mergeDesc : prometheus .NewDesc (
140
- "tidb_ddl_temp_index_merge" ,
141
- "Gauge of temp index merge times." ,
142
- []string {"table_id" }, nil ,
143
- ),
144
- scanDesc : prometheus .NewDesc (
145
- "tidb_ddl_temp_index_scan" ,
146
- "Gauge of temp index scan times." ,
147
- []string {"table_id" }, nil ,
129
+ read : sync.Map {},
130
+ desc : prometheus .NewDesc (
131
+ "tidb_ddl_temp_index_op_count" ,
132
+ "Gauge of temp index operation count" ,
133
+ []string {"type" , "table_id" }, nil ,
148
134
),
149
135
}
150
136
}
151
137
152
138
func (c * collector ) Describe (ch chan <- * prometheus.Desc ) {
153
- ch <- c .singleWriteDesc
154
- ch <- c .doubleWriteDesc
155
- ch <- c .mergeDesc
156
- ch <- c .scanDesc
139
+ ch <- c .desc
157
140
}
158
141
159
142
func (c * collector ) Collect (ch chan <- prometheus.Metric ) {
@@ -167,71 +150,56 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
167
150
tableID := tableKey .(int64 )
168
151
//nolint:forcetypeassert
169
152
tblColl := tableValue .(* tableCollector )
170
- if _ , exists := singleMap [tableID ]; ! exists {
171
- singleMap [tableID ] = 0
172
- }
173
153
singleMap [tableID ] += tblColl .totalSingleWriteCnt .Load ()
174
- if _ , exists := doubleMap [tableID ]; ! exists {
175
- doubleMap [tableID ] = 0
176
- }
177
154
doubleMap [tableID ] += tblColl .totalDoubleWriteCnt .Load ()
178
155
return true
179
156
})
180
157
return true
181
158
})
182
159
for tableID , cnt := range singleMap {
183
160
ch <- prometheus .MustNewConstMetric (
184
- c .singleWriteDesc ,
161
+ c .desc ,
185
162
prometheus .GaugeValue ,
186
163
float64 (cnt ),
164
+ labelSingleWrite ,
187
165
strconv .FormatInt (tableID , 10 ),
188
166
)
189
167
}
190
168
for tableID , cnt := range doubleMap {
191
169
ch <- prometheus .MustNewConstMetric (
192
- c .doubleWriteDesc ,
170
+ c .desc ,
193
171
prometheus .GaugeValue ,
194
172
float64 (cnt ),
173
+ labelDoubleWrite ,
195
174
strconv .FormatInt (tableID , 10 ),
196
175
)
197
176
}
198
177
mergeMap := make (map [int64 ]uint64 )
199
- c .merge .Range (func (key , value any ) bool {
178
+ scanMap := make (map [int64 ]uint64 )
179
+ c .read .Range (func (key , value any ) bool {
200
180
//nolint:forcetypeassert
201
181
tableID := key .(int64 )
202
182
//nolint:forcetypeassert
203
- opCount := value .(* atomic.Uint64 ).Load ()
204
- if _ , exists := mergeMap [tableID ]; ! exists {
205
- mergeMap [tableID ] = 0
206
- }
207
- mergeMap [tableID ] += opCount
183
+ ms := value .(* mergeAndScan )
184
+ mergeMap [tableID ] += ms .merge .Load ()
185
+ scanMap [tableID ] += ms .scan .Load ()
208
186
return true
209
187
})
210
188
for tableID , cnt := range mergeMap {
211
189
ch <- prometheus .MustNewConstMetric (
212
- c .mergeDesc ,
190
+ c .desc ,
213
191
prometheus .GaugeValue ,
214
192
float64 (cnt ),
193
+ labelMerge ,
215
194
strconv .FormatInt (tableID , 10 ),
216
195
)
217
196
}
218
- scanMap := make (map [int64 ]uint64 )
219
- c .scan .Range (func (key , value any ) bool {
220
- //nolint:forcetypeassert
221
- tableID := key .(int64 )
222
- //nolint:forcetypeassert
223
- opCount := value .(* atomic.Uint64 ).Load ()
224
- if _ , exists := scanMap [tableID ]; ! exists {
225
- scanMap [tableID ] = 0
226
- }
227
- scanMap [tableID ] += opCount
228
- return true
229
- })
230
197
for tableID , cnt := range scanMap {
231
198
ch <- prometheus .MustNewConstMetric (
232
- c .scanDesc ,
199
+ c .desc ,
233
200
prometheus .GaugeValue ,
234
201
float64 (cnt ),
202
+ labelScan ,
235
203
strconv .FormatInt (tableID , 10 ),
236
204
)
237
205
}
0 commit comments