@@ -773,13 +773,13 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
773773
774774 if ask {
775775 pipe := node .Client .Pipeline ()
776- _ = pipe .Process (NewCmd ("ASKING " ))
776+ _ = pipe .Process (NewCmd ("asking " ))
777777 _ = pipe .Process (cmd )
778778 _ , lastErr = pipe .ExecContext (ctx )
779779 _ = pipe .Close ()
780780 ask = false
781781 } else {
782- lastErr = node .Client ._process (ctx , cmd )
782+ lastErr = node .Client .ProcessContext (ctx , cmd )
783783 }
784784
785785 // If there is no error - we are done.
@@ -840,6 +840,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
840840
841841 var wg sync.WaitGroup
842842 errCh := make (chan error , 1 )
843+
843844 for _ , master := range state .Masters {
844845 wg .Add (1 )
845846 go func (node * clusterNode ) {
@@ -853,6 +854,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
853854 }
854855 }(master )
855856 }
857+
856858 wg .Wait ()
857859
858860 select {
@@ -873,6 +875,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
873875
874876 var wg sync.WaitGroup
875877 errCh := make (chan error , 1 )
878+
876879 for _ , slave := range state .Slaves {
877880 wg .Add (1 )
878881 go func (node * clusterNode ) {
@@ -886,6 +889,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
886889 }
887890 }(slave )
888891 }
892+
889893 wg .Wait ()
890894
891895 select {
@@ -906,6 +910,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
906910
907911 var wg sync.WaitGroup
908912 errCh := make (chan error , 1 )
913+
909914 worker := func (node * clusterNode ) {
910915 defer wg .Done ()
911916 err := fn (node .Client )
@@ -927,6 +932,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
927932 }
928933
929934 wg .Wait ()
935+
930936 select {
931937 case err := <- errCh :
932938 return err
@@ -1068,18 +1074,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
10681074 go func (node * clusterNode , cmds []Cmder ) {
10691075 defer wg .Done ()
10701076
1071- err := node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1072- err := cn .WithWriter (ctx , c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1073- return writeCmd (wr , cmds ... )
1074- })
1075- if err != nil {
1076- return err
1077- }
1078-
1079- return cn .WithReader (ctx , c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1080- return c .pipelineReadCmds (node , rd , cmds , failedCmds )
1081- })
1082- })
1077+ err := c ._processPipelineNode (ctx , node , cmds , failedCmds )
10831078 if err == nil {
10841079 return
10851080 }
@@ -1142,6 +1137,25 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
11421137 return true
11431138}
11441139
1140+ func (c * ClusterClient ) _processPipelineNode (
1141+ ctx context.Context , node * clusterNode , cmds []Cmder , failedCmds * cmdsMap ,
1142+ ) error {
1143+ return node .Client .hooks .processPipeline (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1144+ return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1145+ err := cn .WithWriter (ctx , c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1146+ return writeCmds (wr , cmds )
1147+ })
1148+ if err != nil {
1149+ return err
1150+ }
1151+
1152+ return cn .WithReader (ctx , c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1153+ return c .pipelineReadCmds (node , rd , cmds , failedCmds )
1154+ })
1155+ })
1156+ })
1157+ }
1158+
11451159func (c * ClusterClient ) pipelineReadCmds (
11461160 node * clusterNode , rd * proto.Reader , cmds []Cmder , failedCmds * cmdsMap ,
11471161) error {
@@ -1186,7 +1200,7 @@ func (c *ClusterClient) checkMovedErr(
11861200 }
11871201
11881202 if ask {
1189- failedCmds .Add (node , NewCmd ("ASKING " ), cmd )
1203+ failedCmds .Add (node , NewCmd ("asking " ), cmd )
11901204 return true
11911205 }
11921206
@@ -1243,26 +1257,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
12431257 go func (node * clusterNode , cmds []Cmder ) {
12441258 defer wg .Done ()
12451259
1246- err := node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1247- err := cn .WithWriter (ctx , c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1248- return txPipelineWriteMulti (wr , cmds )
1249- })
1250- if err != nil {
1251- return err
1252- }
1253-
1254- return cn .WithReader (ctx , c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1255- err := c .txPipelineReadQueued (rd , cmds , failedCmds )
1256- if err != nil {
1257- moved , ask , addr := isMovedError (err )
1258- if moved || ask {
1259- return c .cmdsMoved (cmds , moved , ask , addr , failedCmds )
1260- }
1261- return err
1262- }
1263- return pipelineReadCmds (rd , cmds )
1264- })
1265- })
1260+ err := c ._processTxPipelineNode (ctx , node , cmds , failedCmds )
12661261 if err == nil {
12671262 return
12681263 }
@@ -1296,11 +1291,42 @@ func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
12961291 return cmdsMap
12971292}
12981293
1294+ func (c * ClusterClient ) _processTxPipelineNode (
1295+ ctx context.Context , node * clusterNode , cmds []Cmder , failedCmds * cmdsMap ,
1296+ ) error {
1297+ return node .Client .hooks .processTxPipeline (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1298+ return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1299+ err := cn .WithWriter (ctx , c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1300+ return writeCmds (wr , cmds )
1301+ })
1302+ if err != nil {
1303+ return err
1304+ }
1305+
1306+ return cn .WithReader (ctx , c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1307+ statusCmd := cmds [0 ].(* StatusCmd )
1308+ // Trim multi and exec.
1309+ cmds = cmds [1 : len (cmds )- 1 ]
1310+
1311+ err := c .txPipelineReadQueued (rd , statusCmd , cmds , failedCmds )
1312+ if err != nil {
1313+ moved , ask , addr := isMovedError (err )
1314+ if moved || ask {
1315+ return c .cmdsMoved (cmds , moved , ask , addr , failedCmds )
1316+ }
1317+ return err
1318+ }
1319+
1320+ return pipelineReadCmds (rd , cmds )
1321+ })
1322+ })
1323+ })
1324+ }
1325+
12991326func (c * ClusterClient ) txPipelineReadQueued (
1300- rd * proto.Reader , cmds []Cmder , failedCmds * cmdsMap ,
1327+ rd * proto.Reader , statusCmd * StatusCmd , cmds []Cmder , failedCmds * cmdsMap ,
13011328) error {
13021329 // Parse queued replies.
1303- var statusCmd StatusCmd
13041330 if err := statusCmd .readReply (rd ); err != nil {
13051331 return err
13061332 }
@@ -1352,7 +1378,7 @@ func (c *ClusterClient) cmdsMoved(
13521378
13531379 if ask {
13541380 for _ , cmd := range cmds {
1355- failedCmds .Add (node , NewCmd ("ASKING " ), cmd )
1381+ failedCmds .Add (node , NewCmd ("asking " ), cmd )
13561382 }
13571383 return nil
13581384 }
0 commit comments