@@ -846,8 +846,8 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
846
846
c .cmdable = c .Process
847
847
848
848
c .hooks .setProcess (c .process )
849
- c .hooks .setProcessPipeline (c ._processPipeline )
850
- c .hooks .setProcessTxPipeline (c ._processTxPipeline )
849
+ c .hooks .setProcessPipeline (c .processPipeline )
850
+ c .hooks .setProcessTxPipeline (c .processTxPipeline )
851
851
852
852
return c
853
853
}
@@ -1187,7 +1187,7 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error)
1187
1187
return c .Pipeline ().Pipelined (ctx , fn )
1188
1188
}
1189
1189
1190
- func (c * ClusterClient ) _processPipeline (ctx context.Context , cmds []Cmder ) error {
1190
+ func (c * ClusterClient ) processPipeline (ctx context.Context , cmds []Cmder ) error {
1191
1191
cmdsMap := newCmdsMap ()
1192
1192
1193
1193
if err := c .mapCmdsByNode (ctx , cmdsMap , cmds ); err != nil {
@@ -1210,7 +1210,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
1210
1210
wg .Add (1 )
1211
1211
go func (node * clusterNode , cmds []Cmder ) {
1212
1212
defer wg .Done ()
1213
- c ._processPipelineNode (ctx , node , cmds , failedCmds )
1213
+ c .processPipelineNode (ctx , node , cmds , failedCmds )
1214
1214
}(node , cmds )
1215
1215
}
1216
1216
@@ -1263,22 +1263,38 @@ func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool
1263
1263
return true
1264
1264
}
1265
1265
1266
- func (c * ClusterClient ) _processPipelineNode (
1266
+ func (c * ClusterClient ) processPipelineNode (
1267
1267
ctx context.Context , node * clusterNode , cmds []Cmder , failedCmds * cmdsMap ,
1268
1268
) {
1269
1269
_ = node .Client .hooks .withProcessPipelineHook (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1270
- return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1271
- if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1272
- return writeCmds (wr , cmds )
1273
- }); err != nil {
1274
- setCmdsErr (cmds , err )
1275
- return err
1276
- }
1270
+ cn , err := node .Client .getConn (ctx )
1271
+ if err != nil {
1272
+ _ = c .mapCmdsByNode (ctx , failedCmds , cmds )
1273
+ setCmdsErr (cmds , err )
1274
+ return err
1275
+ }
1277
1276
1278
- return cn .WithReader (c .context (ctx ), c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1279
- return c .pipelineReadCmds (ctx , node , rd , cmds , failedCmds )
1280
- })
1281
- })
1277
+ err = c .processPipelineNodeConn (ctx , node , cn , cmds , failedCmds )
1278
+ node .Client .releaseConn (ctx , cn , err )
1279
+ return err
1280
+ })
1281
+ }
1282
+
1283
+ func (c * ClusterClient ) processPipelineNodeConn (
1284
+ ctx context.Context , node * clusterNode , cn * pool.Conn , cmds []Cmder , failedCmds * cmdsMap ,
1285
+ ) error {
1286
+ if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1287
+ return writeCmds (wr , cmds )
1288
+ }); err != nil {
1289
+ if shouldRetry (err , true ) {
1290
+ _ = c .mapCmdsByNode (ctx , failedCmds , cmds )
1291
+ }
1292
+ setCmdsErr (cmds , err )
1293
+ return err
1294
+ }
1295
+
1296
+ return cn .WithReader (c .context (ctx ), c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1297
+ return c .pipelineReadCmds (ctx , node , rd , cmds , failedCmds )
1282
1298
})
1283
1299
}
1284
1300
@@ -1365,7 +1381,7 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
1365
1381
return c .TxPipeline ().Pipelined (ctx , fn )
1366
1382
}
1367
1383
1368
- func (c * ClusterClient ) _processTxPipeline (ctx context.Context , cmds []Cmder ) error {
1384
+ func (c * ClusterClient ) processTxPipeline (ctx context.Context , cmds []Cmder ) error {
1369
1385
// Trim multi .. exec.
1370
1386
cmds = cmds [1 : len (cmds )- 1 ]
1371
1387
@@ -1399,7 +1415,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
1399
1415
wg .Add (1 )
1400
1416
go func (node * clusterNode , cmds []Cmder ) {
1401
1417
defer wg .Done ()
1402
- c ._processTxPipelineNode (ctx , node , cmds , failedCmds )
1418
+ c .processTxPipelineNode (ctx , node , cmds , failedCmds )
1403
1419
}(node , cmds )
1404
1420
}
1405
1421
@@ -1423,40 +1439,56 @@ func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int
1423
1439
return cmdsMap
1424
1440
}
1425
1441
1426
- func (c * ClusterClient ) _processTxPipelineNode (
1442
+ func (c * ClusterClient ) processTxPipelineNode (
1427
1443
ctx context.Context , node * clusterNode , cmds []Cmder , failedCmds * cmdsMap ,
1428
1444
) {
1429
1445
cmds = wrapMultiExec (ctx , cmds )
1430
1446
_ = node .Client .hooks .withProcessPipelineHook (ctx , cmds , func (ctx context.Context , cmds []Cmder ) error {
1431
- return node .Client .withConn (ctx , func (ctx context.Context , cn * pool.Conn ) error {
1432
- if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1433
- return writeCmds (wr , cmds )
1434
- }); err != nil {
1435
- setCmdsErr (cmds , err )
1436
- return err
1437
- }
1447
+ cn , err := node .Client .getConn (ctx )
1448
+ if err != nil {
1449
+ _ = c .mapCmdsByNode (ctx , failedCmds , cmds )
1450
+ setCmdsErr (cmds , err )
1451
+ return err
1452
+ }
1438
1453
1439
- return cn .WithReader (c .context (ctx ), c .opt .ReadTimeout , func (rd * proto.Reader ) error {
1440
- statusCmd := cmds [0 ].(* StatusCmd )
1441
- // Trim multi and exec.
1442
- trimmedCmds := cmds [1 : len (cmds )- 1 ]
1454
+ err = c .processTxPipelineNodeConn (ctx , node , cn , cmds , failedCmds )
1455
+ node .Client .releaseConn (ctx , cn , err )
1456
+ return err
1457
+ })
1458
+ }
1443
1459
1444
- if err := c .txPipelineReadQueued (
1445
- ctx , rd , statusCmd , trimmedCmds , failedCmds ,
1446
- ); err != nil {
1447
- setCmdsErr (cmds , err )
1460
+ func (c * ClusterClient ) processTxPipelineNodeConn (
1461
+ ctx context.Context , node * clusterNode , cn * pool.Conn , cmds []Cmder , failedCmds * cmdsMap ,
1462
+ ) error {
1463
+ if err := cn .WithWriter (c .context (ctx ), c .opt .WriteTimeout , func (wr * proto.Writer ) error {
1464
+ return writeCmds (wr , cmds )
1465
+ }); err != nil {
1466
+ if shouldRetry (err , true ) {
1467
+ _ = c .mapCmdsByNode (ctx , failedCmds , cmds )
1468
+ }
1469
+ setCmdsErr (cmds , err )
1470
+ return err
1471
+ }
1448
1472
1449
- moved , ask , addr := isMovedError ( err )
1450
- if moved || ask {
1451
- return c . cmdsMoved ( ctx , trimmedCmds , moved , ask , addr , failedCmds )
1452
- }
1473
+ return cn . WithReader ( c . context ( ctx ), c . opt . ReadTimeout , func ( rd * proto. Reader ) error {
1474
+ statusCmd := cmds [ 0 ].( * StatusCmd )
1475
+ // Trim multi and exec.
1476
+ trimmedCmds := cmds [ 1 : len ( cmds ) - 1 ]
1453
1477
1454
- return err
1455
- }
1478
+ if err := c .txPipelineReadQueued (
1479
+ ctx , rd , statusCmd , trimmedCmds , failedCmds ,
1480
+ ); err != nil {
1481
+ setCmdsErr (cmds , err )
1456
1482
1457
- return pipelineReadCmds (rd , trimmedCmds )
1458
- })
1459
- })
1483
+ moved , ask , addr := isMovedError (err )
1484
+ if moved || ask {
1485
+ return c .cmdsMoved (ctx , trimmedCmds , moved , ask , addr , failedCmds )
1486
+ }
1487
+
1488
+ return err
1489
+ }
1490
+
1491
+ return pipelineReadCmds (rd , trimmedCmds )
1460
1492
})
1461
1493
}
1462
1494
0 commit comments