Skip to content

Commit

Permalink
Merge pull request redis#1260 from go-redis/fix/txPipelineWriteMulti
Browse files Browse the repository at this point in the history
Don't allocate tmp slice in txPipelineWriteMulti
  • Loading branch information
vmihailenco authored Feb 14, 2020
2 parents c01b1dc + 218b17f commit 726f680
Show file tree
Hide file tree
Showing 11 changed files with 486 additions and 84 deletions.
6 changes: 3 additions & 3 deletions bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func BenchmarkClusterPing(b *testing.B) {
}
defer stopCluster(cluster)

client := cluster.clusterClient(redisClusterOptions())
client := cluster.newClusterClient(redisClusterOptions())
defer client.Close()

b.ResetTimer()
Expand All @@ -280,7 +280,7 @@ func BenchmarkClusterSetString(b *testing.B) {
}
defer stopCluster(cluster)

client := cluster.clusterClient(redisClusterOptions())
client := cluster.newClusterClient(redisClusterOptions())
defer client.Close()

value := string(bytes.Repeat([]byte{'1'}, 10000))
Expand Down Expand Up @@ -308,7 +308,7 @@ func BenchmarkClusterReloadState(b *testing.B) {
}
defer stopCluster(cluster)

client := cluster.clusterClient(redisClusterOptions())
client := cluster.newClusterClient(redisClusterOptions())
defer client.Close()

b.ResetTimer()
Expand Down
102 changes: 64 additions & 38 deletions cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -773,13 +773,13 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {

if ask {
pipe := node.Client.Pipeline()
_ = pipe.Process(NewCmd("ASKING"))
_ = pipe.Process(NewCmd("asking"))
_ = pipe.Process(cmd)
_, lastErr = pipe.ExecContext(ctx)
_ = pipe.Close()
ask = false
} else {
lastErr = node.Client._process(ctx, cmd)
lastErr = node.Client.ProcessContext(ctx, cmd)
}

// If there is no error - we are done.
Expand Down Expand Up @@ -840,6 +840,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {

var wg sync.WaitGroup
errCh := make(chan error, 1)

for _, master := range state.Masters {
wg.Add(1)
go func(node *clusterNode) {
Expand All @@ -853,6 +854,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
}
}(master)
}

wg.Wait()

select {
Expand All @@ -873,6 +875,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {

var wg sync.WaitGroup
errCh := make(chan error, 1)

for _, slave := range state.Slaves {
wg.Add(1)
go func(node *clusterNode) {
Expand All @@ -886,6 +889,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
}
}(slave)
}

wg.Wait()

select {
Expand All @@ -906,6 +910,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {

var wg sync.WaitGroup
errCh := make(chan error, 1)

worker := func(node *clusterNode) {
defer wg.Done()
err := fn(node.Client)
Expand All @@ -927,6 +932,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
}

wg.Wait()

select {
case err := <-errCh:
return err
Expand Down Expand Up @@ -1068,18 +1074,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()

err := node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmds...)
})
if err != nil {
return err
}

return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
return c.pipelineReadCmds(node, rd, cmds, failedCmds)
})
})
err := c._processPipelineNode(ctx, node, cmds, failedCmds)
if err == nil {
return
}
Expand Down Expand Up @@ -1142,6 +1137,25 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
return true
}

func (c *ClusterClient) _processPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
) error {
return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
})
if err != nil {
return err
}

return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
return c.pipelineReadCmds(node, rd, cmds, failedCmds)
})
})
})
}

func (c *ClusterClient) pipelineReadCmds(
node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
) error {
Expand Down Expand Up @@ -1186,7 +1200,7 @@ func (c *ClusterClient) checkMovedErr(
}

if ask {
failedCmds.Add(node, NewCmd("ASKING"), cmd)
failedCmds.Add(node, NewCmd("asking"), cmd)
return true
}

Expand Down Expand Up @@ -1243,26 +1257,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()

err := node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return txPipelineWriteMulti(wr, cmds)
})
if err != nil {
return err
}

return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
err := c.txPipelineReadQueued(rd, cmds, failedCmds)
if err != nil {
moved, ask, addr := isMovedError(err)
if moved || ask {
return c.cmdsMoved(cmds, moved, ask, addr, failedCmds)
}
return err
}
return pipelineReadCmds(rd, cmds)
})
})
err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
if err == nil {
return
}
Expand Down Expand Up @@ -1296,11 +1291,42 @@ func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
return cmdsMap
}

func (c *ClusterClient) _processTxPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
) error {
return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
})
if err != nil {
return err
}

return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
statusCmd := cmds[0].(*StatusCmd)
// Trim multi and exec.
cmds = cmds[1 : len(cmds)-1]

err := c.txPipelineReadQueued(rd, statusCmd, cmds, failedCmds)
if err != nil {
moved, ask, addr := isMovedError(err)
if moved || ask {
return c.cmdsMoved(cmds, moved, ask, addr, failedCmds)
}
return err
}

return pipelineReadCmds(rd, cmds)
})
})
})
}

func (c *ClusterClient) txPipelineReadQueued(
rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap,
) error {
// Parse queued replies.
var statusCmd StatusCmd
if err := statusCmd.readReply(rd); err != nil {
return err
}
Expand Down Expand Up @@ -1352,7 +1378,7 @@ func (c *ClusterClient) cmdsMoved(

if ask {
for _, cmd := range cmds {
failedCmds.Add(node, NewCmd("ASKING"), cmd)
failedCmds.Add(node, NewCmd("asking"), cmd)
}
return nil
}
Expand Down
Loading

0 comments on commit 726f680

Please sign in to comment.