Skip to content

Commit

Permalink
make query with limit run in single parallel (matrixorigin#18803)
Browse files Browse the repository at this point in the history
make query with limit run in single parallel

Approved by: @ouyuanning, @aunjgr, @triump2020, @sukki37
  • Loading branch information
badboynt1 committed Sep 19, 2024
1 parent 42a7c45 commit 75ae7b4
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 9 deletions.
3 changes: 3 additions & 0 deletions pkg/sql/compile/compile.go
Original file line number Diff line number Diff line change
Expand Up @@ -2403,6 +2403,9 @@ func (c *Compile) compileTableScanDataSource(s *Scope) error {
s.DataSource.FilterExpr = filterExpr
s.DataSource.RuntimeFilterSpecs = n.RuntimeFilterProbeList
s.DataSource.OrderBy = n.OrderBy
if len(n.OrderBy) > 0 || n.Limit != nil {
s.DataSource.hasLimit = true
}

return nil
}
Expand Down
14 changes: 7 additions & 7 deletions pkg/sql/compile/scope.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ func buildScanParallelRun(s *Scope, c *Compile) (*Scope, error) {

// determined how many cpus we should use.
blkSlice := objectio.BlockInfoSlice(s.NodeInfo.Data)
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, blkSlice.Len())
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, blkSlice.Len(), s.DataSource.hasLimit)

readers, err = c.e.NewBlockReader(
ctx, scanUsedCpuNumber,
Expand All @@ -530,10 +530,10 @@ func buildScanParallelRun(s *Scope, c *Compile) (*Scope, error) {
switch s.NodeInfo.Rel.GetEngineType() {
case engine.Disttae:
blkSlice := objectio.BlockInfoSlice(s.NodeInfo.Data)
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, blkSlice.Len())
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, blkSlice.Len(), s.DataSource.hasLimit)
case engine.Memory:
idSlice := memoryengine.ShardIdSlice(s.NodeInfo.Data)
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, idSlice.Len())
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, idSlice.Len(), s.DataSource.hasLimit)
default:
scanUsedCpuNumber = 1
}
Expand Down Expand Up @@ -608,10 +608,10 @@ func buildScanParallelRun(s *Scope, c *Compile) (*Scope, error) {
switch rel.GetEngineType() {
case engine.Disttae:
blkSlice := objectio.BlockInfoSlice(s.NodeInfo.Data)
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, blkSlice.Len())
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, blkSlice.Len(), s.DataSource.hasLimit)
case engine.Memory:
idSlice := memoryengine.ShardIdSlice(s.NodeInfo.Data)
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, idSlice.Len())
scanUsedCpuNumber = DetermineRuntimeDOP(maxProvidedCpuNumber, idSlice.Len(), s.DataSource.hasLimit)
default:
scanUsedCpuNumber = 1
}
Expand Down Expand Up @@ -733,8 +733,8 @@ func buildScanParallelRun(s *Scope, c *Compile) (*Scope, error) {
return mergeFromParallelScanScope, nil
}

func DetermineRuntimeDOP(cpunum, blocks int) int {
if cpunum <= 0 || blocks <= 16 {
func DetermineRuntimeDOP(cpunum, blocks int, haslimit bool) int {
if cpunum <= 0 || blocks <= 16 || haslimit {
return 1
}
ret := blocks/16 + 1
Expand Down
3 changes: 2 additions & 1 deletion pkg/sql/compile/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ const (

// Source contains information of a relation which will be used in execution.
type Source struct {
isConst bool
isConst bool
hasLimit bool

PushdownId uint64
PushdownAddr string
Expand Down
4 changes: 3 additions & 1 deletion pkg/vm/engine/disttae/tools.go
Original file line number Diff line number Diff line change
Expand Up @@ -1520,7 +1520,9 @@ func distributeBlocksToBlockReaders(rds []*blockReader, numOfReaders int, numOfB
}
}
scanType := NORMAL
if numOfBlocks < numOfReaders*SMALLSCAN_THRESHOLD {
if numOfReaders == 1 {
scanType = SMALL
} else if numOfBlocks < numOfReaders*SMALLSCAN_THRESHOLD {
scanType = SMALL
} else if (numOfReaders * LARGESCAN_THRESHOLD) <= numOfBlocks {
scanType = LARGE
Expand Down

0 comments on commit 75ae7b4

Please sign in to comment.