-
Notifications
You must be signed in to change notification settings - Fork 6k
ddl: Corrected index management during REORGANIZE PARTITION #56786
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
6df378f
27978e0
8046319
10f5f61
cc4f5a8
ee82920
c45e3e9
16d1628
a45bba6
f68c29d
ff3d980
91fb5f9
bf5defc
6103faf
f803d7b
08fab0b
e3125e2
28f21e4
d4347d9
f21b86e
03d049e
4f1c046
a9d576e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2185,9 +2185,7 @@ func (w *worker) rollbackLikeDropPartition(jobCtx *jobContext, job *model.Job) ( | |
|
||
var dropIndices []*model.IndexInfo | ||
for _, indexInfo := range tblInfo.Indices { | ||
if indexInfo.Unique && | ||
indexInfo.State == model.StateDeleteReorganization && | ||
tblInfo.Partition.DDLState == model.StateDeleteReorganization { | ||
if indexInfo.State == model.StateWriteOnly { | ||
dropIndices = append(dropIndices, indexInfo) | ||
} | ||
} | ||
|
@@ -3043,9 +3041,6 @@ func (w *worker) onExchangeTablePartition(jobCtx *jobContext, job *model.Job) (v | |
} | ||
|
||
func getNewGlobal(partInfo *model.PartitionInfo, idx *model.IndexInfo) bool { | ||
if len(partInfo.DDLUpdateIndexes) == 0 { | ||
return idx.Global | ||
} | ||
for _, newIdx := range partInfo.DDLUpdateIndexes { | ||
if strings.EqualFold(idx.Name.L, newIdx.IndexName) { | ||
return newIdx.Global | ||
|
@@ -3151,6 +3146,9 @@ func getReorgPartitionInfo(t *meta.Mutator, job *model.Job, args *model.TablePar | |
// | ||
// Everything now looks as it should, no memory of old partitions/indexes, | ||
// and no more double writing, since the previous state is only reading the new partitions/indexes. | ||
// | ||
// Note: Special handling is also required in tables.newPartitionedTable(), | ||
// to get per partition indexes in the right state. | ||
func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver int64, _ error) { | ||
args, err := model.GetTablePartitionArgs(job) | ||
if err != nil { | ||
|
@@ -3262,39 +3260,33 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver | |
if err != nil { | ||
return ver, errors.Trace(err) | ||
} | ||
if !inAllPartitionColumns { | ||
// Currently only support Explicit Global indexes. | ||
if !newGlobal { | ||
job.State = model.JobStateCancelled | ||
return ver, dbterror.ErrGlobalIndexNotExplicitlySet.GenWithStackByArgs(index.Name.O) | ||
} | ||
// Duplicate the unique indexes with new index ids. | ||
// If previously was Global or will be Global: | ||
// it must be recreated with new index ID | ||
// TODO: Could we allow that session in StateWriteReorganization, when StateDeleteReorganization | ||
// has started, may not find changes through the global index that sessions in StateDeleteReorganization made? | ||
// If so, then we could avoid copying the full Global Index if it has not changed from LOCAL! | ||
// It might be possible to use the new, not yet public partitions to access those rows?! | ||
// Just that it would not work with explicit partition select SELECT FROM t PARTITION (p,...) | ||
newIndex := index.Clone() | ||
newIndex.State = model.StateDeleteOnly | ||
newIndex.ID = AllocateIndexID(tblInfo) | ||
newIndex.Global = true | ||
tblInfo.Indices = append(tblInfo.Indices, newIndex) | ||
} else { | ||
if newGlobal { | ||
// TODO: For the future loosen this restriction and allow global indexes for unique keys also including all partitioning columns | ||
return ver, dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("PARTITION BY, index '%v' is unique and contains all partitioning columns, but has Global Index set", index.Name.O)) | ||
} | ||
if index.Global { | ||
// Index was previously Global, now it needs to be duplicated and become a local index. | ||
newIndex := index.Clone() | ||
newIndex.State = model.StateDeleteOnly | ||
newIndex.ID = AllocateIndexID(tblInfo) | ||
newIndex.Global = false | ||
tblInfo.Indices = append(tblInfo.Indices, newIndex) | ||
} | ||
// Currently only support Explicit Global indexes. | ||
if !inAllPartitionColumns && !newGlobal { | ||
job.State = model.JobStateCancelled | ||
return ver, dbterror.ErrGlobalIndexNotExplicitlySet.GenWithStackByArgs(index.Name.O) | ||
} | ||
if !index.Global && !newGlobal { | ||
// still local index, no need to duplicate index. | ||
continue | ||
} | ||
if tblInfo.Partition.DDLChangedIndex == nil { | ||
tblInfo.Partition.DDLChangedIndex = make(map[int64]bool) | ||
} | ||
// Duplicate the unique indexes with new index ids. | ||
// If previously was Global or will be Global: | ||
// it must be recreated with new index ID | ||
// TODO: Could we allow that session in StateWriteReorganization, when StateDeleteReorganization | ||
// has started, may not find changes through the global index that sessions in StateDeleteReorganization made? | ||
// If so, then we could avoid copying the full Global Index if it has not changed from LOCAL! | ||
// It might be possible to use the new, not yet public partitions to access those rows?! | ||
// Just that it would not work with explicit partition select SELECT FROM t PARTITION (p,...) | ||
newIndex := index.Clone() | ||
newIndex.State = model.StateDeleteOnly | ||
newIndex.ID = AllocateIndexID(tblInfo) | ||
tblInfo.Partition.DDLChangedIndex[index.ID] = false | ||
tblInfo.Partition.DDLChangedIndex[newIndex.ID] = true | ||
newIndex.Global = newGlobal | ||
tblInfo.Indices = append(tblInfo.Indices, newIndex) | ||
} | ||
failpoint.Inject("reorgPartCancel1", func(val failpoint.Value) { | ||
if val.(bool) { | ||
|
@@ -3487,26 +3479,18 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver | |
if !index.Unique { | ||
Defined2014 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
continue | ||
} | ||
switch index.State { | ||
case model.StateWriteReorganization: | ||
isNew, ok := tblInfo.Partition.DDLChangedIndex[index.ID] | ||
if !ok { | ||
continue | ||
} | ||
if isNew { | ||
// Newly created index, replacing old unique/global index | ||
index.State = model.StatePublic | ||
case model.StatePublic: | ||
if index.Global { | ||
// Mark the old global index as non-readable, and to be dropped | ||
index.State = model.StateDeleteReorganization | ||
} else { | ||
inAllPartitionColumns, err := checkPartitionKeysConstraint(partInfo, index.Columns, tblInfo) | ||
if err != nil { | ||
return rollbackReorganizePartitionWithErr(jobCtx, job, err) | ||
} | ||
if !inAllPartitionColumns { | ||
// Mark the old unique index as non-readable, and to be dropped, | ||
// since it is replaced by a global index | ||
index.State = model.StateDeleteReorganization | ||
} | ||
} | ||
continue | ||
} | ||
// Old index, should not be visible any longer, | ||
// but needs to be kept up-to-date in case rollback happens. | ||
index.State = model.StateWriteOnly | ||
} | ||
firstPartIdx, lastPartIdx, idMap, err2 := getReplacedPartitionIDs(partNames, tblInfo.Partition) | ||
if err2 != nil { | ||
|
@@ -3563,14 +3547,18 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver | |
|
||
var dropIndices []*model.IndexInfo | ||
for _, indexInfo := range tblInfo.Indices { | ||
if indexInfo.Unique && indexInfo.State == model.StateDeleteReorganization { | ||
if indexInfo.Unique && indexInfo.State == model.StateWriteOnly { | ||
// Drop the old unique (possible global) index, see onDropIndex | ||
indexInfo.State = model.StateNone | ||
DropIndexColumnFlag(tblInfo, indexInfo) | ||
RemoveDependentHiddenColumns(tblInfo, indexInfo) | ||
dropIndices = append(dropIndices, indexInfo) | ||
} | ||
} | ||
// TODO: verify that the indexes are dropped, | ||
// and that StateDeleteOnly+StateDeleteReorganization is not needed. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Tests shows that at least the old partitions can be written (double write in case we need to rollback), so we need to add a new state to make sure all clients only use (read and write) the new partitions and new indexes, this should not be fixed in this PR, but a new PR for fixing #56819. |
||
// local indexes is not an issue, since they will be gone with the dropped | ||
// partitions, but replaced global indexes should be checked! | ||
for _, indexInfo := range dropIndices { | ||
removeIndexInfo(tblInfo, indexInfo) | ||
} | ||
|
@@ -3632,6 +3620,9 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver | |
failpoint.Return(ver, errors.New("Injected error by reorgPartFail5")) | ||
} | ||
}) | ||
failpoint.Inject("updateVersionAndTableInfoErrInStateDeleteReorganization", func() { | ||
failpoint.Return(ver, errors.New("Injected error in StateDeleteReorganization")) | ||
}) | ||
args.OldPhysicalTblIDs = physicalTableIDs | ||
args.NewPartitionIDs = newIDs | ||
ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why only drop the write-only indexes? What about other states? For example, what will happen when we rollback from the "none -> delete-only" step?
There may be
delete-only
indexes before converting to rolling back job:There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There should only be either
StatePublic
orStateWriteOnly
so only write-only indexes that should be removed. Also in the follow ups #57114 and #56974 (based on this PR) it will block the rollback here and here for the last state. One can see this PR a part 1 of 3, where the optimistic path works, then the failure/rollback/cleanup is fixed in part 2 and 3.