@@ -71,11 +71,8 @@ type ClusterOptions struct {
7171 WriteTimeout time.Duration
7272 ContextTimeoutEnabled bool
7373
74- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
75- PoolFIFO bool
76-
77- // PoolSize applies per cluster node and not for the whole cluster.
78- PoolSize int
74+ PoolFIFO bool
75+ PoolSize int // applies per cluster node and not for the whole cluster
7976 PoolTimeout time.Duration
8077 MinIdleConns int
8178 MaxIdleConns int
@@ -391,6 +388,7 @@ type clusterNodes struct {
391388 nodes map [string ]* clusterNode
392389 activeAddrs []string
393390 closed bool
391+ onNewNode []func (rdb * Client )
394392
395393 _generation uint32 // atomic
396394}
@@ -426,6 +424,12 @@ func (c *clusterNodes) Close() error {
426424 return firstErr
427425}
428426
427+ func (c * clusterNodes ) OnNewNode (fn func (rdb * Client )) {
428+ c .mu .Lock ()
429+ c .onNewNode = append (c .onNewNode , fn )
430+ c .mu .Unlock ()
431+ }
432+
429433func (c * clusterNodes ) Addrs () ([]string , error ) {
430434 var addrs []string
431435
@@ -503,6 +507,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
503507 }
504508
505509 node = newClusterNode (c .opt , addr )
510+ for _ , fn := range c .onNewNode {
511+ fn (node .Client )
512+ }
506513
507514 c .addrs = appendIfNotExists (c .addrs , addr )
508515 c .nodes [addr ] = node
@@ -812,18 +819,14 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er
812819
813820//------------------------------------------------------------------------------
814821
815- type clusterClient struct {
816- opt * ClusterOptions
817- nodes * clusterNodes
818- state * clusterStateHolder //nolint:structcheck
819- cmdsInfoCache * cmdsInfoCache //nolint:structcheck
820- }
821-
822822// ClusterClient is a Redis Cluster client representing a pool of zero
823823// or more underlying connections. It's safe for concurrent use by
824824// multiple goroutines.
825825type ClusterClient struct {
826- * clusterClient
826+ opt * ClusterOptions
827+ nodes * clusterNodes
828+ state * clusterStateHolder
829+ cmdsInfoCache * cmdsInfoCache
827830 cmdable
828831 hooks
829832}
@@ -834,15 +837,18 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
834837 opt .init ()
835838
836839 c := & ClusterClient {
837- clusterClient : & clusterClient {
838- opt : opt ,
839- nodes : newClusterNodes (opt ),
840- },
840+ opt : opt ,
841+ nodes : newClusterNodes (opt ),
841842 }
843+
842844 c .state = newClusterStateHolder (c .loadState )
843845 c .cmdsInfoCache = newCmdsInfoCache (c .cmdsInfo )
844846 c .cmdable = c .Process
845847
848+ c .hooks .process = c .process
849+ c .hooks .processPipeline = c ._processPipeline
850+ c .hooks .processTxPipeline = c ._processTxPipeline
851+
846852 return c
847853}
848854
@@ -873,13 +879,14 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
873879}
874880
875881func (c * ClusterClient ) Process (ctx context.Context , cmd Cmder ) error {
876- return c .hooks .process (ctx , cmd , c .process )
882+ err := c .hooks .process (ctx , cmd )
883+ cmd .SetErr (err )
884+ return err
877885}
878886
879887func (c * ClusterClient ) process (ctx context.Context , cmd Cmder ) error {
880888 cmdInfo := c .cmdInfo (ctx , cmd .Name ())
881889 slot := c .cmdSlot (ctx , cmd )
882-
883890 var node * clusterNode
884891 var ask bool
885892 var lastErr error
@@ -899,11 +906,12 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
899906 }
900907
901908 if ask {
909+ ask = false
910+
902911 pipe := node .Client .Pipeline ()
903912 _ = pipe .Process (ctx , NewCmd (ctx , "asking" ))
904913 _ = pipe .Process (ctx , cmd )
905914 _ , lastErr = pipe .Exec (ctx )
906- ask = false
907915 } else {
908916 lastErr = node .Client .Process (ctx , cmd )
909917 }
@@ -958,6 +966,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
958966 return lastErr
959967}
960968
969+ func (c * ClusterClient ) OnNewNode (fn func (rdb * Client )) {
970+ c .nodes .OnNewNode (fn )
971+ }
972+
961973// ForEachMaster concurrently calls the fn on each master node in the cluster.
962974// It returns the first error if any.
963975func (c * ClusterClient ) ForEachMaster (
@@ -1165,7 +1177,7 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
11651177
11661178func (c * ClusterClient ) Pipeline () Pipeliner {
11671179 pipe := Pipeline {
1168- exec : c . processPipeline ,
1180+ exec : pipelineExecer ( c . hooks . processPipeline ) ,
11691181 }
11701182 pipe .init ()
11711183 return & pipe
@@ -1175,10 +1187,6 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error)
11751187 return c .Pipeline ().Pipelined (ctx , fn )
11761188}
11771189
1178- func (c * ClusterClient ) processPipeline (ctx context.Context , cmds []Cmder ) error {
1179- return c .hooks .processPipeline (ctx , cmds , c ._processPipeline )
1180- }
1181-
11821190func (c * ClusterClient ) _processPipeline (ctx context.Context , cmds []Cmder ) error {
11831191 cmdsMap := newCmdsMap ()
11841192
@@ -1258,7 +1266,7 @@ func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool
12581266func (c * ClusterClient ) _processPipelineNode (
12591267 ctx context.Context , node * clusterNode , cmds []Cmder , failedCmds * cmdsMap ,
12601268) {
1261- _ = node .Client .hooks .processPipeline (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1269+ _ = node .Client .hooks .withProcessPipelineHook (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
12621270 return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
12631271 if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
12641272 return writeCmds (wr , cmds )
@@ -1344,7 +1352,10 @@ func (c *ClusterClient) checkMovedErr(
13441352// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
13451353func (c * ClusterClient ) TxPipeline () Pipeliner {
13461354 pipe := Pipeline {
1347- exec : c .processTxPipeline ,
1355+ exec : func (ctx context.Context , cmds []Cmder ) error {
1356+ cmds = wrapMultiExec (ctx , cmds )
1357+ return c .hooks .processTxPipeline (ctx , cmds )
1358+ },
13481359 }
13491360 pipe .init ()
13501361 return & pipe
@@ -1354,10 +1365,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
13541365 return c .TxPipeline ().Pipelined (ctx , fn )
13551366}
13561367
1357- func (c * ClusterClient ) processTxPipeline (ctx context.Context , cmds []Cmder ) error {
1358- return c .hooks .processTxPipeline (ctx , cmds , c ._processTxPipeline )
1359- }
1360-
13611368func (c * ClusterClient ) _processTxPipeline (ctx context.Context , cmds []Cmder ) error {
13621369 // Trim multi .. exec.
13631370 cmds = cmds [1 : len (cmds )- 1 ]
@@ -1419,38 +1426,38 @@ func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int
14191426func (c * ClusterClient ) _processTxPipelineNode (
14201427 ctx context.Context , node * clusterNode , cmds []Cmder , failedCmds * cmdsMap ,
14211428) {
1422- _ = node .Client .hooks .processTxPipeline (
1423- ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1424- return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1425- if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1426- return writeCmds (wr , cmds )
1427- }); err != nil {
1428- setCmdsErr (cmds , err )
1429- return err
1430- }
1431-
1432- return cn .WithReader (c .context (ctx ), c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1433- statusCmd := cmds [0 ].(* StatusCmd )
1434- // Trim multi and exec.
1435- trimmedCmds := cmds [1 : len (cmds )- 1 ]
1429+ cmds = wrapMultiExec (ctx , cmds )
1430+ _ = node .Client .hooks .withProcessPipelineHook (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1431+ return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1432+ if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1433+ return writeCmds (wr , cmds )
1434+ }); err != nil {
1435+ setCmdsErr (cmds , err )
1436+ return err
1437+ }
14361438
1437- if err := c . txPipelineReadQueued (
1438- ctx , rd , statusCmd , trimmedCmds , failedCmds ,
1439- ); err != nil {
1440- setCmdsErr ( cmds , err )
1439+ return cn . WithReader ( c . context ( ctx ), c . opt . ReadTimeout , func ( rd * proto. Reader ) error {
1440+ statusCmd := cmds [ 0 ].( * StatusCmd )
1441+ // Trim multi and exec.
1442+ trimmedCmds := cmds [ 1 : len ( cmds ) - 1 ]
14411443
1442- moved , ask , addr := isMovedError ( err )
1443- if moved || ask {
1444- return c . cmdsMoved ( ctx , trimmedCmds , moved , ask , addr , failedCmds )
1445- }
1444+ if err := c . txPipelineReadQueued (
1445+ ctx , rd , statusCmd , trimmedCmds , failedCmds ,
1446+ ); err != nil {
1447+ setCmdsErr ( cmds , err )
14461448
1447- return err
1449+ moved , ask , addr := isMovedError (err )
1450+ if moved || ask {
1451+ return c .cmdsMoved (ctx , trimmedCmds , moved , ask , addr , failedCmds )
14481452 }
14491453
1450- return pipelineReadCmds (rd , trimmedCmds )
1451- })
1454+ return err
1455+ }
1456+
1457+ return pipelineReadCmds (rd , trimmedCmds )
14521458 })
14531459 })
1460+ })
14541461}
14551462
14561463func (c * ClusterClient ) txPipelineReadQueued (
@@ -1742,7 +1749,7 @@ func (c *ClusterClient) cmdNode(
17421749 return state .slotMasterNode (slot )
17431750}
17441751
1745- func (c * clusterClient ) slotReadOnlyNode (state * clusterState , slot int ) (* clusterNode , error ) {
1752+ func (c * ClusterClient ) slotReadOnlyNode (state * clusterState , slot int ) (* clusterNode , error ) {
17461753 if c .opt .RouteByLatency {
17471754 return state .slotClosestNode (slot )
17481755 }
0 commit comments