@@ -1046,7 +1046,7 @@ func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error
10461046
10471047func (c * ClusterClient ) _processPipeline (ctx context.Context , cmds []Cmder ) error {
10481048 cmdsMap := newCmdsMap ()
1049- err := c .mapCmdsByNode (cmds , cmdsMap )
1049+ err := c .mapCmdsByNode (cmdsMap , cmds )
10501050 if err != nil {
10511051 setCmdsErr (cmds , err )
10521052 return err
@@ -1080,11 +1080,15 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
10801080 return c .pipelineReadCmds (node , rd , cmds , failedCmds )
10811081 })
10821082 })
1083- if err != nil {
1084- err = c .mapCmdsByNode (cmds , failedCmds )
1085- if err != nil {
1083+ if err == nil {
1084+ return
1085+ }
1086+ if attempt < c .opt .MaxRedirects {
1087+ if err := c .mapCmdsByNode (failedCmds , cmds ); err != nil {
10861088 setCmdsErr (cmds , err )
10871089 }
1090+ } else {
1091+ setCmdsErr (cmds , err )
10881092 }
10891093 }(node , cmds )
10901094 }
@@ -1099,41 +1103,27 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
10991103 return cmdsFirstErr (cmds )
11001104}
11011105
1102- type cmdsMap struct {
1103- mu sync.Mutex
1104- m map [* clusterNode ][]Cmder
1105- }
1106-
1107- func newCmdsMap () * cmdsMap {
1108- return & cmdsMap {
1109- m : make (map [* clusterNode ][]Cmder ),
1110- }
1111- }
1112-
1113- func (m * cmdsMap ) Add (node * clusterNode , cmds ... Cmder ) {
1114- m .mu .Lock ()
1115- m .m [node ] = append (m .m [node ], cmds ... )
1116- m .mu .Unlock ()
1117- }
1118-
1119- func (c * ClusterClient ) mapCmdsByNode (cmds []Cmder , cmdsMap * cmdsMap ) error {
1106+ func (c * ClusterClient ) mapCmdsByNode (cmdsMap * cmdsMap , cmds []Cmder ) error {
11201107 state , err := c .state .Get ()
11211108 if err != nil {
11221109 return err
11231110 }
11241111
1125- cmdsAreReadOnly := c .opt .ReadOnly && c .cmdsAreReadOnly (cmds )
1112+ if c .opt .ReadOnly && c .cmdsAreReadOnly (cmds ) {
1113+ for _ , cmd := range cmds {
1114+ slot := c .cmdSlot (cmd )
1115+ node , err := c .slotReadOnlyNode (state , slot )
1116+ if err != nil {
1117+ return err
1118+ }
1119+ cmdsMap .Add (node , cmd )
1120+ }
1121+ return nil
1122+ }
1123+
11261124 for _ , cmd := range cmds {
11271125 slot := c .cmdSlot (cmd )
1128-
1129- var node * clusterNode
1130- var err error
1131- if cmdsAreReadOnly {
1132- cmdInfo := c .cmdInfo (cmd .Name ())
1133- node , err = c .cmdNode (cmdInfo , slot )
1134- } else {
1135- node , err = state .slotMasterNode (slot )
1136- }
1126+ node , err := state .slotMasterNode (slot )
11371127 if err != nil {
11381128 return err
11391129 }
@@ -1261,7 +1251,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
12611251 return err
12621252 }
12631253
1264- err = cn .WithReader (ctx , c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1254+ return cn .WithReader (ctx , c .opt .ReadTimeout , func (rd * proto.Reader ) error {
12651255 err := c .txPipelineReadQueued (rd , cmds , failedCmds )
12661256 if err != nil {
12671257 moved , ask , addr := isMovedError (err )
@@ -1272,13 +1262,16 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
12721262 }
12731263 return pipelineReadCmds (rd , cmds )
12741264 })
1275- return err
12761265 })
1277- if err != nil {
1278- err = c .mapCmdsByNode (cmds , failedCmds )
1279- if err != nil {
1266+ if err == nil {
1267+ return
1268+ }
1269+ if attempt < c .opt .MaxRedirects {
1270+ if err := c .mapCmdsByNode (failedCmds , cmds ); err != nil {
12801271 setCmdsErr (cmds , err )
12811272 }
1273+ } else {
1274+ setCmdsErr (cmds , err )
12821275 }
12831276 }(node , cmds )
12841277 }
@@ -1561,29 +1554,27 @@ func (c *ClusterClient) cmdNode(cmdInfo *CommandInfo, slot int) (*clusterNode, e
15611554 }
15621555
15631556 if c .opt .ReadOnly && cmdInfo != nil && cmdInfo .ReadOnly {
1564- if c .opt .RouteByLatency {
1565- return state .slotClosestNode (slot )
1566- }
1567- if c .opt .RouteRandomly {
1568- return state .slotRandomNode (slot )
1569- }
1570- return state .slotSlaveNode (slot )
1557+ return c .slotReadOnlyNode (state , slot )
15711558 }
1572-
15731559 return state .slotMasterNode (slot )
15741560}
15751561
1562+ func (c * clusterClient ) slotReadOnlyNode (state * clusterState , slot int ) (* clusterNode , error ) {
1563+ if c .opt .RouteByLatency {
1564+ return state .slotClosestNode (slot )
1565+ }
1566+ if c .opt .RouteRandomly {
1567+ return state .slotRandomNode (slot )
1568+ }
1569+ return state .slotSlaveNode (slot )
1570+ }
1571+
15761572func (c * ClusterClient ) slotMasterNode (slot int ) (* clusterNode , error ) {
15771573 state , err := c .state .Get ()
15781574 if err != nil {
15791575 return nil , err
15801576 }
1581-
1582- nodes := state .slotNodes (slot )
1583- if len (nodes ) > 0 {
1584- return nodes [0 ], nil
1585- }
1586- return c .nodes .Random ()
1577+ return state .slotMasterNode (slot )
15871578}
15881579
15891580func appendUniqueNode (nodes []* clusterNode , node * clusterNode ) []* clusterNode {
@@ -1622,3 +1613,22 @@ func remove(ss []string, es ...string) []string {
16221613 }
16231614 return ss
16241615}
1616+
1617+ //------------------------------------------------------------------------------
1618+
1619+ type cmdsMap struct {
1620+ mu sync.Mutex
1621+ m map [* clusterNode ][]Cmder
1622+ }
1623+
1624+ func newCmdsMap () * cmdsMap {
1625+ return & cmdsMap {
1626+ m : make (map [* clusterNode ][]Cmder ),
1627+ }
1628+ }
1629+
1630+ func (m * cmdsMap ) Add (node * clusterNode , cmds ... Cmder ) {
1631+ m .mu .Lock ()
1632+ m .m [node ] = append (m .m [node ], cmds ... )
1633+ m .mu .Unlock ()
1634+ }
0 commit comments