@@ -0,0 +1,2 @@ | |||||
.idea | |||||
dist/ |
@@ -0,0 +1,13 @@ | |||||
module rebeam | |||||
go 1.17 | |||||
require ( | |||||
github.com/go-redis/redis/v8 v8.11.4 | |||||
github.com/nsqio/go-diskqueue v1.1.0 | |||||
) | |||||
require ( | |||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect | |||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect | |||||
) |
@@ -0,0 +1,99 @@ | |||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= | |||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= | |||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= | |||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= | |||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | |||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= | |||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= | |||||
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= | |||||
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= | |||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= | |||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | |||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= | |||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= | |||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= | |||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= | |||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= | |||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= | |||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= | |||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= | |||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | |||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | |||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | |||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= | |||||
github.com/nsqio/go-diskqueue v1.1.0 h1:r0dJ0DMXT3+2mOq+79cvCjnhoBxyGC2S9O+OjQrpe4Q= | |||||
github.com/nsqio/go-diskqueue v1.1.0/go.mod h1:INuJIxl4ayUsyoNtHL5+9MFPDfSZ0zY93hNY6vhBRsI= | |||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= | |||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= | |||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= | |||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= | |||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= | |||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= | |||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= | |||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= | |||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= | |||||
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= | |||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= | |||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | |||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | |||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | |||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | |||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | |||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | |||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | |||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | |||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= | |||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= | |||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= | |||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | |||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | |||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | |||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= | |||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | |||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | |||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | |||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= | |||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= | |||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= | |||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= | |||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= | |||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= | |||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= | |||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= | |||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | |||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= | |||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= | |||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= | |||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= | |||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= |
@@ -0,0 +1,461 @@ | |||||
package main | |||||
import ( | |||||
"context" | |||||
"encoding/json" | |||||
"fmt" | |||||
"log" | |||||
"net/http" | |||||
_ "net/http/pprof" | |||||
"os" | |||||
"os/signal" | |||||
"strings" | |||||
"syscall" | |||||
"time" | |||||
"github.com/go-redis/redis/v8" | |||||
dq "github.com/nsqio/go-diskqueue" | |||||
) | |||||
const ( | |||||
DefaultWatermarkHigh int64 = 100000 | |||||
DefaultWatermarkLow int64 = 100000 | |||||
DefaultBatchSize int64 = 10000 | |||||
) | |||||
func l(_ dq.LogLevel, f string, args ...interface{}) { | |||||
log.Printf(f, args...) | |||||
} | |||||
type ProjectRedisConfig struct { | |||||
Host string `json:"host"` | |||||
Pass string `json:"pass"` | |||||
Port int `json:"port"` | |||||
} | |||||
type ProjectOffloadConfig struct { | |||||
WatermarkHigh int64 `json:"high"` | |||||
WatermarkMiddle int64 `json:"middle"` | |||||
WatermarkLow int64 `json:"low"` | |||||
BatchSize int64 `json:"batchsize"` | |||||
} | |||||
type ProjectConfig struct { | |||||
RedisConfig *ProjectRedisConfig `json:"redis,omitempty"` | |||||
OffloadConfig ProjectOffloadConfig `json:"offload"` | |||||
} | |||||
type Offloader struct { | |||||
RedisClient *redis.Client | |||||
ProjectConfig ProjectConfig | |||||
OffloadConfig ProjectOffloadConfig | |||||
Context context.Context | |||||
Cancel context.CancelFunc | |||||
Done chan bool | |||||
Queues map[string]dq.Interface | |||||
Sets map[string]string | |||||
Name string | |||||
} | |||||
func (that *Offloader) CleanName(s string) string { | |||||
return strings.ReplaceAll(strings.ReplaceAll(s, "/", "_"), "\x00", "_") | |||||
} | |||||
func (that *Offloader) RedisConfigDiffers(new *ProjectRedisConfig) bool { | |||||
if that.ProjectConfig.RedisConfig == nil && new == nil { | |||||
return false | |||||
} | |||||
if that.ProjectConfig.RedisConfig == nil || new == nil || that.ProjectConfig.RedisConfig.Host != new.Host || that.ProjectConfig.RedisConfig.Port != new.Port || that.ProjectConfig.RedisConfig.Pass != new.Pass { | |||||
return true | |||||
} | |||||
return false | |||||
} | |||||
func (that *Offloader) OffloadConfigDiffers(new ProjectOffloadConfig) bool { | |||||
return that.OffloadConfig.WatermarkHigh != new.WatermarkHigh || that.OffloadConfig.WatermarkMiddle != new.WatermarkMiddle || that.OffloadConfig.WatermarkLow != new.WatermarkLow || that.OffloadConfig.BatchSize != new.BatchSize | |||||
} | |||||
func (that *Offloader) RefreshQueues() { | |||||
pipe := that.RedisClient.Pipeline() | |||||
prioritiesCmdRes := pipe.ZRange(that.Context, fmt.Sprintf("%s:priorities", that.Name), 0, -1) | |||||
filtersCmdRes := pipe.SMembers(that.Context, fmt.Sprintf("%s:filters", that.Name)) | |||||
_, err := pipe.Exec(that.Context) | |||||
if err != nil { | |||||
log.Printf("unable to refresh queues for offloader %s: %s", that.Name, err) | |||||
return | |||||
} | |||||
priorities, err := prioritiesCmdRes.Result() | |||||
if err != nil { | |||||
log.Printf("unable to refresh queues for offloader %s: %s", that.Name, err) | |||||
return | |||||
} | |||||
filters, err := filtersCmdRes.Result() | |||||
if err != nil { | |||||
log.Printf("unable to refresh queues for offloader %s: %s", that.Name, err) | |||||
return | |||||
} | |||||
setQueueMap := map[string]string{ | |||||
"todo": "todo", | |||||
"todo:secondary": "todo:secondary", | |||||
"todo:redo": "todo:redo", | |||||
"done": "done", | |||||
"unretrievable": "unretrievable", | |||||
} | |||||
for _, filter := range filters { | |||||
setQueueMap[fmt.Sprintf("filtered:%s", filter)] = "filtered" | |||||
} | |||||
for _, priority := range priorities { | |||||
setQueueMap[fmt.Sprintf("todo:prio:%s", priority)] = fmt.Sprintf("todo:prio:%s", priority) | |||||
} | |||||
needQueueMap := map[string]bool{} | |||||
for setName, queueName := range setQueueMap { | |||||
needQueueMap[queueName] = true | |||||
if _, has := that.Queues[queueName]; !has { | |||||
log.Printf("opening queue %s for %s:%s", queueName, that.Name, setName) | |||||
that.Queues[queueName] = dq.New(fmt.Sprintf("%s:%s", that.Name, that.CleanName(queueName)), dataDir, 128*1024*1024, 0, 128*1024*1024, 1_000_000, 5*time.Second, l) | |||||
} | |||||
that.Sets[setName] = queueName | |||||
} | |||||
for k, v := range that.Queues { | |||||
if _, has := needQueueMap[k]; !has { | |||||
v.Close() | |||||
delete(that.Queues, k) | |||||
} | |||||
} | |||||
} | |||||
func (that *Offloader) CloseQueues() { | |||||
for k, q := range that.Queues { | |||||
log.Printf("closing queue %s for %s", k, that.Name) | |||||
q.Close() | |||||
} | |||||
} | |||||
func (that *Offloader) UpdateStats() { | |||||
hset := map[string]interface{}{} | |||||
for k, q := range that.Sets { | |||||
if k != q { | |||||
continue | |||||
} | |||||
hset[k] = fmt.Sprintf("%d", that.Queues[q].Depth()) | |||||
} | |||||
_, err := that.RedisClient.HSet(that.Context, fmt.Sprintf("%s:offloaded", that.Name), hset).Result() | |||||
if err != nil { | |||||
log.Printf("unable to hmset %s:offloaded: %s", that.Name, err) | |||||
} | |||||
} | |||||
func (that *Offloader) Do() { | |||||
defer close(that.Done) | |||||
defer that.Cancel() | |||||
if that.ProjectConfig.RedisConfig != nil { | |||||
defer that.RedisClient.Close() | |||||
} | |||||
that.Sets = map[string]string{} | |||||
that.Queues = map[string]dq.Interface{} | |||||
defer that.CloseQueues() | |||||
ticker := time.NewTicker(1 * time.Second) | |||||
defer ticker.Stop() | |||||
refreshTicker := time.NewTicker(5 * time.Minute) | |||||
defer refreshTicker.Stop() | |||||
that.RefreshQueues() | |||||
that.UpdateStats() | |||||
skipSleepChan := make(chan bool, 1) | |||||
defer close(skipSleepChan) | |||||
watermarkHigh := that.OffloadConfig.WatermarkHigh | |||||
if watermarkHigh == 0 { | |||||
watermarkHigh = DefaultWatermarkHigh | |||||
} | |||||
watermarkLow := that.OffloadConfig.WatermarkLow | |||||
if watermarkLow == 0 { | |||||
watermarkLow = DefaultWatermarkLow | |||||
} | |||||
batchSize := that.OffloadConfig.BatchSize | |||||
if batchSize == 0 { | |||||
batchSize = DefaultBatchSize | |||||
} | |||||
for { | |||||
//for k, q := range that.Queues { | |||||
// key := fmt.Sprintf("%s:%s", that.Name, k) | |||||
// scard, err := that.RedisClient.SCard(that.Context, key).Result() | |||||
// if err != nil { | |||||
// log.Printf("unable to scard %s: %s", key, err) | |||||
// continue | |||||
// } | |||||
// for scard > watermarkHigh || scard < watermarkLow { | |||||
// select { | |||||
// case <-that.Context.Done(): | |||||
// return | |||||
// case <-refreshTicker.C: | |||||
// that.RefreshQueues() | |||||
// that.UpdateStats() | |||||
// default: | |||||
// } | |||||
// if scard > watermarkHigh { | |||||
// spopLimit := scard - watermarkHigh | |||||
// if spopLimit > batchSize { | |||||
// spopLimit = batchSize | |||||
// } | |||||
// ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) | |||||
// entries, err := that.RedisClient.SPopN(ctx, key, spopLimit).Result() | |||||
// cancel() | |||||
// if err != nil { | |||||
// log.Printf("unable to spop %s: %s", key, err) | |||||
// } | |||||
// scard = scard - int64(len(entries)) | |||||
// for _, entry := range entries { | |||||
// err := q.Put([]byte(entry)) | |||||
// if err != nil { | |||||
// log.Printf("unable to q.Put %s: %s", key, err) | |||||
// return | |||||
// } | |||||
// } | |||||
// } else if scard < watermarkLow { | |||||
// spopLimit := watermarkLow - scard | |||||
// if spopLimit > batchSize { | |||||
// spopLimit = batchSize | |||||
// } | |||||
// var entries []interface{} | |||||
// for q.Depth() > 0 && int64(len(entries)) < spopLimit { | |||||
// entry := <-q.ReadChan() | |||||
// entries = append(entries, string(entry)) | |||||
// } | |||||
// if len(entries) == 0 { | |||||
// break | |||||
// } | |||||
// ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) | |||||
// _, err := that.RedisClient.SAdd(ctx, key, entries...).Result() | |||||
// cancel() | |||||
// if err != nil { | |||||
// log.Printf("unable to sadd %s %#v: %s", key, entries, err) | |||||
// for _, entry := range entries { | |||||
// err := q.Put([]byte(entry.(string))) | |||||
// if err != nil { | |||||
// log.Printf("unable to q.Put %s: %s", key, err) | |||||
// } | |||||
// } | |||||
// return | |||||
// } | |||||
// scard = scard + int64(len(entries)) | |||||
// } | |||||
// } | |||||
//} | |||||
scards := map[string]*redis.IntCmd{} | |||||
pipe := that.RedisClient.Pipeline() | |||||
for k := range that.Sets { | |||||
key := fmt.Sprintf("%s:%s", that.Name, k) | |||||
scards[k] = pipe.SCard(that.Context, key) | |||||
} | |||||
_, err := pipe.Exec(that.Context) | |||||
if err != nil { | |||||
log.Printf("unable to scard %s: %s", that.Name, err) | |||||
} else { | |||||
rerun := false | |||||
for k, q := range that.Sets { | |||||
key := fmt.Sprintf("%s:%s", that.Name, k) | |||||
scard, err := scards[k].Result() | |||||
if err != nil { | |||||
log.Printf("unable to scard %s: %s", key, err) | |||||
continue | |||||
} | |||||
if scard > watermarkHigh { | |||||
spopLimit := scard - watermarkHigh | |||||
if spopLimit > batchSize { | |||||
spopLimit = batchSize | |||||
} | |||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) | |||||
entries, err := that.RedisClient.SPopN(ctx, key, spopLimit).Result() | |||||
cancel() | |||||
if err != nil { | |||||
log.Printf("unable to spop %s: %s", key, err) | |||||
} | |||||
if len(entries) == 0 { | |||||
continue | |||||
} | |||||
for _, entry := range entries { | |||||
err := that.Queues[q].Put([]byte(entry)) | |||||
if err != nil { | |||||
log.Printf("unable to q.Put %s: %s", key, err) | |||||
return | |||||
} | |||||
} | |||||
rerun = true | |||||
} else if k == q && scard < watermarkLow && that.Queues[q].Depth() > 0 { | |||||
spopLimit := watermarkLow - scard | |||||
if spopLimit > batchSize { | |||||
spopLimit = batchSize | |||||
} | |||||
var entries []interface{} | |||||
for that.Queues[q].Depth() > 0 && int64(len(entries)) < spopLimit { | |||||
entry := <-that.Queues[q].ReadChan() | |||||
entries = append(entries, string(entry)) | |||||
} | |||||
if len(entries) == 0 { | |||||
continue | |||||
} | |||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) | |||||
_, err := that.RedisClient.SAdd(ctx, key, entries...).Result() | |||||
cancel() | |||||
if err != nil { | |||||
log.Printf("unable to sadd %s: %s", key, err) | |||||
for _, entry := range entries { | |||||
err := that.Queues[q].Put([]byte(entry.(string))) | |||||
if err != nil { | |||||
log.Printf("unable to q.Put %s: %s", key, err) | |||||
} | |||||
} | |||||
return | |||||
} | |||||
rerun = true | |||||
} | |||||
} | |||||
if rerun { | |||||
select { | |||||
case skipSleepChan <- true: | |||||
default: | |||||
} | |||||
} | |||||
that.UpdateStats() | |||||
} | |||||
select { | |||||
case <-that.Context.Done(): | |||||
return | |||||
case <-refreshTicker.C: | |||||
that.RefreshQueues() | |||||
that.UpdateStats() | |||||
case <-ticker.C: | |||||
that.UpdateStats() | |||||
case <-skipSleepChan: | |||||
} | |||||
} | |||||
} | |||||
var offloaders = map[string]*Offloader{} | |||||
func StopProjects() { | |||||
var doneChans []chan bool | |||||
for project, offloader := range offloaders { | |||||
log.Printf("stopping offloader %s", project) | |||||
offloader.Cancel() | |||||
doneChans = append(doneChans, offloader.Done) | |||||
} | |||||
for _, c := range doneChans { | |||||
<-c | |||||
} | |||||
} | |||||
func RefreshProjects(redisClient *redis.Client) { | |||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) | |||||
res, err := redisClient.HGetAll(ctx, "trackers").Result() | |||||
cancel() | |||||
if err != nil { | |||||
log.Printf("unable to refresh trackers table: %s", err) | |||||
return | |||||
} | |||||
updatedProjects := map[string]ProjectConfig{} | |||||
for project, configString := range res { | |||||
//if project != "ua" && project != "ukr-net" && project != "ua-urls" { | |||||
// continue | |||||
//} | |||||
config := ProjectConfig{} | |||||
err := json.Unmarshal([]byte(configString), &config) | |||||
if err != nil { | |||||
log.Printf("unable to decode project %s config: %s", project, err) | |||||
continue | |||||
} | |||||
updatedProjects[project] = config | |||||
} | |||||
for project, offloader := range offloaders { | |||||
_, stopRequired := updatedProjects[project] | |||||
stopRequired = !stopRequired | |||||
if !stopRequired { | |||||
stopRequired = offloader.OffloadConfigDiffers(updatedProjects[project].OffloadConfig) | |||||
if !stopRequired { | |||||
stopRequired = offloader.RedisConfigDiffers(updatedProjects[project].RedisConfig) | |||||
if !stopRequired { | |||||
select { | |||||
case <-offloader.Context.Done(): | |||||
stopRequired = true | |||||
case <-offloader.Done: | |||||
stopRequired = true | |||||
default: | |||||
} | |||||
} | |||||
} | |||||
} | |||||
if stopRequired { | |||||
log.Printf("stopping offloader %s", project) | |||||
offloader.Cancel() | |||||
<-offloader.Done | |||||
delete(offloaders, project) | |||||
} | |||||
} | |||||
for project, config := range updatedProjects { | |||||
if _, has := offloaders[project]; !has { | |||||
log.Printf("starting offloader %s", project) | |||||
offloader := &Offloader{} | |||||
offloader.Name = project | |||||
offloader.ProjectConfig = config | |||||
if config.RedisConfig != nil { | |||||
offloader.RedisClient = redis.NewClient(&redis.Options{ | |||||
Addr: fmt.Sprintf("%s:%d", config.RedisConfig.Host, config.RedisConfig.Port), | |||||
Username: "default", | |||||
Password: config.RedisConfig.Pass, | |||||
ReadTimeout: 15 * time.Minute, | |||||
}) | |||||
} else { | |||||
offloader.RedisClient = redisClient | |||||
} | |||||
offloader.Context, offloader.Cancel = context.WithCancel(context.Background()) | |||||
offloader.Done = make(chan bool) | |||||
offloaders[project] = offloader | |||||
go offloader.Do() | |||||
} | |||||
} | |||||
} | |||||
var dataDir string | |||||
func main() { | |||||
log.SetFlags(log.Flags() | log.Lshortfile) | |||||
go func() { | |||||
if err := http.ListenAndServe("127.0.0.1:16992", nil); err != nil { | |||||
log.Printf("webserver error: %s", err) | |||||
} | |||||
}() | |||||
dataDir = os.Getenv("DATA_DIR") | |||||
if dataDir == "" { | |||||
log.Panicf("no DATA_DIR specified") | |||||
} | |||||
mainOptions, err := redis.ParseURL(os.Getenv("REDIS_URL")) | |||||
if err != nil { | |||||
log.Panicf("%s", err) | |||||
} | |||||
mainOptions.ReadTimeout = 15 * time.Minute | |||||
mainClient := redis.NewClient(mainOptions) | |||||
sc := make(chan os.Signal, 1) | |||||
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) | |||||
ticker := time.NewTicker(1 * time.Minute) | |||||
for { | |||||
RefreshProjects(mainClient) | |||||
select { | |||||
case <-sc: | |||||
StopProjects() | |||||
return | |||||
case <-ticker.C: | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,22 @@ | |||||
Copyright (c) 2016 Caleb Spare | |||||
MIT License | |||||
Permission is hereby granted, free of charge, to any person obtaining | |||||
a copy of this software and associated documentation files (the | |||||
"Software"), to deal in the Software without restriction, including | |||||
without limitation the rights to use, copy, modify, merge, publish, | |||||
distribute, sublicense, and/or sell copies of the Software, and to | |||||
permit persons to whom the Software is furnished to do so, subject to | |||||
the following conditions: | |||||
The above copyright notice and this permission notice shall be | |||||
included in all copies or substantial portions of the Software. | |||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | |||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
@@ -0,0 +1,69 @@ | |||||
# xxhash | |||||
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) | |||||
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) | |||||
xxhash is a Go implementation of the 64-bit | |||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a | |||||
high-quality hashing algorithm that is much faster than anything in the Go | |||||
standard library. | |||||
This package provides a straightforward API: | |||||
``` | |||||
func Sum64(b []byte) uint64 | |||||
func Sum64String(s string) uint64 | |||||
type Digest struct{ ... } | |||||
func New() *Digest | |||||
``` | |||||
The `Digest` type implements hash.Hash64. Its key methods are: | |||||
``` | |||||
func (*Digest) Write([]byte) (int, error) | |||||
func (*Digest) WriteString(string) (int, error) | |||||
func (*Digest) Sum64() uint64 | |||||
``` | |||||
This implementation provides a fast pure-Go implementation and an even faster | |||||
assembly implementation for amd64. | |||||
## Compatibility | |||||
This package is in a module and the latest code is in version 2 of the module. | |||||
You need a version of Go with at least "minimal module compatibility" to use | |||||
github.com/cespare/xxhash/v2: | |||||
* 1.9.7+ for Go 1.9 | |||||
* 1.10.3+ for Go 1.10 | |||||
* Go 1.11 or later | |||||
I recommend using the latest release of Go. | |||||
## Benchmarks | |||||
Here are some quick benchmarks comparing the pure-Go and assembly | |||||
implementations of Sum64. | |||||
| input size | purego | asm | | |||||
| --- | --- | --- | | |||||
| 5 B | 979.66 MB/s | 1291.17 MB/s | | |||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s | | |||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s | | |||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s | | |||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using | |||||
the following commands under Go 1.11.2: | |||||
``` | |||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' | |||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes' | |||||
``` | |||||
## Projects using this package | |||||
- [InfluxDB](https://github.com/influxdata/influxdb) | |||||
- [Prometheus](https://github.com/prometheus/prometheus) | |||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) | |||||
- [FreeCache](https://github.com/coocood/freecache) | |||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache) |
@@ -0,0 +1,235 @@ | |||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described | |||||
// at http://cyan4973.github.io/xxHash/. | |||||
package xxhash | |||||
import ( | |||||
"encoding/binary" | |||||
"errors" | |||||
"math/bits" | |||||
) | |||||
const ( | |||||
prime1 uint64 = 11400714785074694791 | |||||
prime2 uint64 = 14029467366897019727 | |||||
prime3 uint64 = 1609587929392839161 | |||||
prime4 uint64 = 9650029242287828579 | |||||
prime5 uint64 = 2870177450012600261 | |||||
) | |||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where | |||||
// possible in the Go code is worth a small (but measurable) performance boost | |||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for | |||||
// convenience in the Go code in a few places where we need to intentionally | |||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the | |||||
// result overflows a uint64). | |||||
var ( | |||||
prime1v = prime1 | |||||
prime2v = prime2 | |||||
prime3v = prime3 | |||||
prime4v = prime4 | |||||
prime5v = prime5 | |||||
) | |||||
// Digest implements hash.Hash64. | |||||
type Digest struct { | |||||
v1 uint64 | |||||
v2 uint64 | |||||
v3 uint64 | |||||
v4 uint64 | |||||
total uint64 | |||||
mem [32]byte | |||||
n int // how much of mem is used | |||||
} | |||||
// New creates a new Digest that computes the 64-bit xxHash algorithm. | |||||
func New() *Digest { | |||||
var d Digest | |||||
d.Reset() | |||||
return &d | |||||
} | |||||
// Reset clears the Digest's state so that it can be reused. | |||||
func (d *Digest) Reset() { | |||||
d.v1 = prime1v + prime2 | |||||
d.v2 = prime2 | |||||
d.v3 = 0 | |||||
d.v4 = -prime1v | |||||
d.total = 0 | |||||
d.n = 0 | |||||
} | |||||
// Size always returns 8 bytes. | |||||
func (d *Digest) Size() int { return 8 } | |||||
// BlockSize always returns 32 bytes. | |||||
func (d *Digest) BlockSize() int { return 32 } | |||||
// Write adds more data to d. It always returns len(b), nil. | |||||
func (d *Digest) Write(b []byte) (n int, err error) { | |||||
n = len(b) | |||||
d.total += uint64(n) | |||||
if d.n+n < 32 { | |||||
// This new data doesn't even fill the current block. | |||||
copy(d.mem[d.n:], b) | |||||
d.n += n | |||||
return | |||||
} | |||||
if d.n > 0 { | |||||
// Finish off the partial block. | |||||
copy(d.mem[d.n:], b) | |||||
d.v1 = round(d.v1, u64(d.mem[0:8])) | |||||
d.v2 = round(d.v2, u64(d.mem[8:16])) | |||||
d.v3 = round(d.v3, u64(d.mem[16:24])) | |||||
d.v4 = round(d.v4, u64(d.mem[24:32])) | |||||
b = b[32-d.n:] | |||||
d.n = 0 | |||||
} | |||||
if len(b) >= 32 { | |||||
// One or more full blocks left. | |||||
nw := writeBlocks(d, b) | |||||
b = b[nw:] | |||||
} | |||||
// Store any remaining partial block. | |||||
copy(d.mem[:], b) | |||||
d.n = len(b) | |||||
return | |||||
} | |||||
// Sum appends the current hash to b and returns the resulting slice. | |||||
func (d *Digest) Sum(b []byte) []byte { | |||||
s := d.Sum64() | |||||
return append( | |||||
b, | |||||
byte(s>>56), | |||||
byte(s>>48), | |||||
byte(s>>40), | |||||
byte(s>>32), | |||||
byte(s>>24), | |||||
byte(s>>16), | |||||
byte(s>>8), | |||||
byte(s), | |||||
) | |||||
} | |||||
// Sum64 returns the current hash. | |||||
func (d *Digest) Sum64() uint64 { | |||||
var h uint64 | |||||
if d.total >= 32 { | |||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 | |||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) | |||||
h = mergeRound(h, v1) | |||||
h = mergeRound(h, v2) | |||||
h = mergeRound(h, v3) | |||||
h = mergeRound(h, v4) | |||||
} else { | |||||
h = d.v3 + prime5 | |||||
} | |||||
h += d.total | |||||
i, end := 0, d.n | |||||
for ; i+8 <= end; i += 8 { | |||||
k1 := round(0, u64(d.mem[i:i+8])) | |||||
h ^= k1 | |||||
h = rol27(h)*prime1 + prime4 | |||||
} | |||||
if i+4 <= end { | |||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1 | |||||
h = rol23(h)*prime2 + prime3 | |||||
i += 4 | |||||
} | |||||
for i < end { | |||||
h ^= uint64(d.mem[i]) * prime5 | |||||
h = rol11(h) * prime1 | |||||
i++ | |||||
} | |||||
h ^= h >> 33 | |||||
h *= prime2 | |||||
h ^= h >> 29 | |||||
h *= prime3 | |||||
h ^= h >> 32 | |||||
return h | |||||
} | |||||
const ( | |||||
magic = "xxh\x06" | |||||
marshaledSize = len(magic) + 8*5 + 32 | |||||
) | |||||
// MarshalBinary implements the encoding.BinaryMarshaler interface. | |||||
func (d *Digest) MarshalBinary() ([]byte, error) { | |||||
b := make([]byte, 0, marshaledSize) | |||||
b = append(b, magic...) | |||||
b = appendUint64(b, d.v1) | |||||
b = appendUint64(b, d.v2) | |||||
b = appendUint64(b, d.v3) | |||||
b = appendUint64(b, d.v4) | |||||
b = appendUint64(b, d.total) | |||||
b = append(b, d.mem[:d.n]...) | |||||
b = b[:len(b)+len(d.mem)-d.n] | |||||
return b, nil | |||||
} | |||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. | |||||
func (d *Digest) UnmarshalBinary(b []byte) error { | |||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic { | |||||
return errors.New("xxhash: invalid hash state identifier") | |||||
} | |||||
if len(b) != marshaledSize { | |||||
return errors.New("xxhash: invalid hash state size") | |||||
} | |||||
b = b[len(magic):] | |||||
b, d.v1 = consumeUint64(b) | |||||
b, d.v2 = consumeUint64(b) | |||||
b, d.v3 = consumeUint64(b) | |||||
b, d.v4 = consumeUint64(b) | |||||
b, d.total = consumeUint64(b) | |||||
copy(d.mem[:], b) | |||||
d.n = int(d.total % uint64(len(d.mem))) | |||||
return nil | |||||
} | |||||
func appendUint64(b []byte, x uint64) []byte { | |||||
var a [8]byte | |||||
binary.LittleEndian.PutUint64(a[:], x) | |||||
return append(b, a[:]...) | |||||
} | |||||
func consumeUint64(b []byte) ([]byte, uint64) { | |||||
x := u64(b) | |||||
return b[8:], x | |||||
} | |||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } | |||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } | |||||
func round(acc, input uint64) uint64 { | |||||
acc += input * prime2 | |||||
acc = rol31(acc) | |||||
acc *= prime1 | |||||
return acc | |||||
} | |||||
func mergeRound(acc, val uint64) uint64 { | |||||
val = round(0, val) | |||||
acc ^= val | |||||
acc = acc*prime1 + prime4 | |||||
return acc | |||||
} | |||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } | |||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } | |||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } | |||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } | |||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } | |||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } | |||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } | |||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } |
@@ -0,0 +1,13 @@ | |||||
// +build !appengine | |||||
// +build gc | |||||
// +build !purego | |||||
package xxhash | |||||
// Sum64 computes the 64-bit xxHash digest of b. | |||||
// | |||||
//go:noescape | |||||
func Sum64(b []byte) uint64 | |||||
//go:noescape | |||||
func writeBlocks(d *Digest, b []byte) int |
@@ -0,0 +1,215 @@ | |||||
// +build !appengine | |||||
// +build gc | |||||
// +build !purego | |||||
#include "textflag.h" | |||||
// Register allocation: | |||||
// AX h | |||||
// SI pointer to advance through b | |||||
// DX n | |||||
// BX loop end | |||||
// R8 v1, k1 | |||||
// R9 v2 | |||||
// R10 v3 | |||||
// R11 v4 | |||||
// R12 tmp | |||||
// R13 prime1v | |||||
// R14 prime2v | |||||
// DI prime4v | |||||
// round reads from and advances the buffer pointer in SI. | |||||
// It assumes that R13 has prime1v and R14 has prime2v. | |||||
#define round(r) \ | |||||
MOVQ (SI), R12 \ | |||||
ADDQ $8, SI \ | |||||
IMULQ R14, R12 \ | |||||
ADDQ R12, r \ | |||||
ROLQ $31, r \ | |||||
IMULQ R13, r | |||||
// mergeRound applies a merge round on the two registers acc and val. | |||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. | |||||
#define mergeRound(acc, val) \ | |||||
IMULQ R14, val \ | |||||
ROLQ $31, val \ | |||||
IMULQ R13, val \ | |||||
XORQ val, acc \ | |||||
IMULQ R13, acc \ | |||||
ADDQ DI, acc | |||||
// func Sum64(b []byte) uint64 | |||||
TEXT ·Sum64(SB), NOSPLIT, $0-32 | |||||
// Load fixed primes. | |||||
MOVQ ·prime1v(SB), R13 | |||||
MOVQ ·prime2v(SB), R14 | |||||
MOVQ ·prime4v(SB), DI | |||||
// Load slice. | |||||
MOVQ b_base+0(FP), SI | |||||
MOVQ b_len+8(FP), DX | |||||
LEAQ (SI)(DX*1), BX | |||||
// The first loop limit will be len(b)-32. | |||||
SUBQ $32, BX | |||||
// Check whether we have at least one block. | |||||
CMPQ DX, $32 | |||||
JLT noBlocks | |||||
// Set up initial state (v1, v2, v3, v4). | |||||
MOVQ R13, R8 | |||||
ADDQ R14, R8 | |||||
MOVQ R14, R9 | |||||
XORQ R10, R10 | |||||
XORQ R11, R11 | |||||
SUBQ R13, R11 | |||||
// Loop until SI > BX. | |||||
blockLoop: | |||||
round(R8) | |||||
round(R9) | |||||
round(R10) | |||||
round(R11) | |||||
CMPQ SI, BX | |||||
JLE blockLoop | |||||
MOVQ R8, AX | |||||
ROLQ $1, AX | |||||
MOVQ R9, R12 | |||||
ROLQ $7, R12 | |||||
ADDQ R12, AX | |||||
MOVQ R10, R12 | |||||
ROLQ $12, R12 | |||||
ADDQ R12, AX | |||||
MOVQ R11, R12 | |||||
ROLQ $18, R12 | |||||
ADDQ R12, AX | |||||
mergeRound(AX, R8) | |||||
mergeRound(AX, R9) | |||||
mergeRound(AX, R10) | |||||
mergeRound(AX, R11) | |||||
JMP afterBlocks | |||||
noBlocks: | |||||
MOVQ ·prime5v(SB), AX | |||||
afterBlocks: | |||||
ADDQ DX, AX | |||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. | |||||
ADDQ $24, BX | |||||
CMPQ SI, BX | |||||
JG fourByte | |||||
wordLoop: | |||||
// Calculate k1. | |||||
MOVQ (SI), R8 | |||||
ADDQ $8, SI | |||||
IMULQ R14, R8 | |||||
ROLQ $31, R8 | |||||
IMULQ R13, R8 | |||||
XORQ R8, AX | |||||
ROLQ $27, AX | |||||
IMULQ R13, AX | |||||
ADDQ DI, AX | |||||
CMPQ SI, BX | |||||
JLE wordLoop | |||||
fourByte: | |||||
ADDQ $4, BX | |||||
CMPQ SI, BX | |||||
JG singles | |||||
MOVL (SI), R8 | |||||
ADDQ $4, SI | |||||
IMULQ R13, R8 | |||||
XORQ R8, AX | |||||
ROLQ $23, AX | |||||
IMULQ R14, AX | |||||
ADDQ ·prime3v(SB), AX | |||||
singles: | |||||
ADDQ $4, BX | |||||
CMPQ SI, BX | |||||
JGE finalize | |||||
singlesLoop: | |||||
MOVBQZX (SI), R12 | |||||
ADDQ $1, SI | |||||
IMULQ ·prime5v(SB), R12 | |||||
XORQ R12, AX | |||||
ROLQ $11, AX | |||||
IMULQ R13, AX | |||||
CMPQ SI, BX | |||||
JL singlesLoop | |||||
finalize: | |||||
MOVQ AX, R12 | |||||
SHRQ $33, R12 | |||||
XORQ R12, AX | |||||
IMULQ R14, AX | |||||
MOVQ AX, R12 | |||||
SHRQ $29, R12 | |||||
XORQ R12, AX | |||||
IMULQ ·prime3v(SB), AX | |||||
MOVQ AX, R12 | |||||
SHRQ $32, R12 | |||||
XORQ R12, AX | |||||
MOVQ AX, ret+24(FP) | |||||
RET | |||||
// writeBlocks uses the same registers as above except that it uses AX to store | |||||
// the d pointer. | |||||
// func writeBlocks(d *Digest, b []byte) int | |||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40 | |||||
// Load fixed primes needed for round. | |||||
MOVQ ·prime1v(SB), R13 | |||||
MOVQ ·prime2v(SB), R14 | |||||
// Load slice. | |||||
MOVQ b_base+8(FP), SI | |||||
MOVQ b_len+16(FP), DX | |||||
LEAQ (SI)(DX*1), BX | |||||
SUBQ $32, BX | |||||
// Load vN from d. | |||||
MOVQ d+0(FP), AX | |||||
MOVQ 0(AX), R8 // v1 | |||||
MOVQ 8(AX), R9 // v2 | |||||
MOVQ 16(AX), R10 // v3 | |||||
MOVQ 24(AX), R11 // v4 | |||||
// We don't need to check the loop condition here; this function is | |||||
// always called with at least one block of data to process. | |||||
blockLoop: | |||||
round(R8) | |||||
round(R9) | |||||
round(R10) | |||||
round(R11) | |||||
CMPQ SI, BX | |||||
JLE blockLoop | |||||
// Copy vN back to d. | |||||
MOVQ R8, 0(AX) | |||||
MOVQ R9, 8(AX) | |||||
MOVQ R10, 16(AX) | |||||
MOVQ R11, 24(AX) | |||||
// The number of bytes written is SI minus the old base pointer. | |||||
SUBQ b_base+8(FP), SI | |||||
MOVQ SI, ret+32(FP) | |||||
RET |
@@ -0,0 +1,76 @@ | |||||
// +build !amd64 appengine !gc purego | |||||
package xxhash | |||||
// Sum64 computes the 64-bit xxHash digest of b. | |||||
func Sum64(b []byte) uint64 { | |||||
// A simpler version would be | |||||
// d := New() | |||||
// d.Write(b) | |||||
// return d.Sum64() | |||||
// but this is faster, particularly for small inputs. | |||||
n := len(b) | |||||
var h uint64 | |||||
if n >= 32 { | |||||
v1 := prime1v + prime2 | |||||
v2 := prime2 | |||||
v3 := uint64(0) | |||||
v4 := -prime1v | |||||
for len(b) >= 32 { | |||||
v1 = round(v1, u64(b[0:8:len(b)])) | |||||
v2 = round(v2, u64(b[8:16:len(b)])) | |||||
v3 = round(v3, u64(b[16:24:len(b)])) | |||||
v4 = round(v4, u64(b[24:32:len(b)])) | |||||
b = b[32:len(b):len(b)] | |||||
} | |||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) | |||||
h = mergeRound(h, v1) | |||||
h = mergeRound(h, v2) | |||||
h = mergeRound(h, v3) | |||||
h = mergeRound(h, v4) | |||||
} else { | |||||
h = prime5 | |||||
} | |||||
h += uint64(n) | |||||
i, end := 0, len(b) | |||||
for ; i+8 <= end; i += 8 { | |||||
k1 := round(0, u64(b[i:i+8:len(b)])) | |||||
h ^= k1 | |||||
h = rol27(h)*prime1 + prime4 | |||||
} | |||||
if i+4 <= end { | |||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 | |||||
h = rol23(h)*prime2 + prime3 | |||||
i += 4 | |||||
} | |||||
for ; i < end; i++ { | |||||
h ^= uint64(b[i]) * prime5 | |||||
h = rol11(h) * prime1 | |||||
} | |||||
h ^= h >> 33 | |||||
h *= prime2 | |||||
h ^= h >> 29 | |||||
h *= prime3 | |||||
h ^= h >> 32 | |||||
return h | |||||
} | |||||
func writeBlocks(d *Digest, b []byte) int { | |||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 | |||||
n := len(b) | |||||
for len(b) >= 32 { | |||||
v1 = round(v1, u64(b[0:8:len(b)])) | |||||
v2 = round(v2, u64(b[8:16:len(b)])) | |||||
v3 = round(v3, u64(b[16:24:len(b)])) | |||||
v4 = round(v4, u64(b[24:32:len(b)])) | |||||
b = b[32:len(b):len(b)] | |||||
} | |||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 | |||||
return n - len(b) | |||||
} |
@@ -0,0 +1,15 @@ | |||||
// +build appengine | |||||
// This file contains the safe implementations of otherwise unsafe-using code. | |||||
package xxhash | |||||
// Sum64String computes the 64-bit xxHash digest of s. | |||||
func Sum64String(s string) uint64 { | |||||
return Sum64([]byte(s)) | |||||
} | |||||
// WriteString adds more data to d. It always returns len(s), nil. | |||||
func (d *Digest) WriteString(s string) (n int, err error) { | |||||
return d.Write([]byte(s)) | |||||
} |
@@ -0,0 +1,57 @@ | |||||
// +build !appengine | |||||
// This file encapsulates usage of unsafe. | |||||
// xxhash_safe.go contains the safe implementations. | |||||
package xxhash | |||||
import ( | |||||
"unsafe" | |||||
) | |||||
// In the future it's possible that compiler optimizations will make these | |||||
// XxxString functions unnecessary by realizing that calls such as | |||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. | |||||
// If that happens, even if we keep these functions they can be replaced with | |||||
// the trivial safe code. | |||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: | |||||
// | |||||
// var b []byte | |||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) | |||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data | |||||
// bh.Len = len(s) | |||||
// bh.Cap = len(s) | |||||
// | |||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough | |||||
// weight to this sequence of expressions that any function that uses it will | |||||
// not be inlined. Instead, the functions below use a different unsafe | |||||
// conversion designed to minimize the inliner weight and allow both to be | |||||
// inlined. There is also a test (TestInlining) which verifies that these are | |||||
// inlined. | |||||
// | |||||
// See https://github.com/golang/go/issues/42739 for discussion. | |||||
// Sum64String computes the 64-bit xxHash digest of s. | |||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy. | |||||
func Sum64String(s string) uint64 { | |||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) | |||||
return Sum64(b) | |||||
} | |||||
// WriteString adds more data to d. It always returns len(s), nil. | |||||
// It may be faster than Write([]byte(s)) by avoiding a copy. | |||||
func (d *Digest) WriteString(s string) (n int, err error) { | |||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) | |||||
// d.Write always returns len(s), nil. | |||||
// Ignoring the return output and returning these fixed values buys a | |||||
// savings of 6 in the inliner's cost model. | |||||
return len(s), nil | |||||
} | |||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout | |||||
// of the first two words is the same as the layout of a string. | |||||
type sliceHeader struct { | |||||
s string | |||||
cap int | |||||
} |
@@ -0,0 +1,21 @@ | |||||
The MIT License (MIT) | |||||
Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com> | |||||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||||
of this software and associated documentation files (the "Software"), to deal | |||||
in the Software without restriction, including without limitation the rights | |||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||||
copies of the Software, and to permit persons to whom the Software is | |||||
furnished to do so, subject to the following conditions: | |||||
The above copyright notice and this permission notice shall be included in | |||||
all copies or substantial portions of the Software. | |||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||||
THE SOFTWARE. |
@@ -0,0 +1,79 @@ | |||||
package rendezvous | |||||
type Rendezvous struct { | |||||
nodes map[string]int | |||||
nstr []string | |||||
nhash []uint64 | |||||
hash Hasher | |||||
} | |||||
type Hasher func(s string) uint64 | |||||
func New(nodes []string, hash Hasher) *Rendezvous { | |||||
r := &Rendezvous{ | |||||
nodes: make(map[string]int, len(nodes)), | |||||
nstr: make([]string, len(nodes)), | |||||
nhash: make([]uint64, len(nodes)), | |||||
hash: hash, | |||||
} | |||||
for i, n := range nodes { | |||||
r.nodes[n] = i | |||||
r.nstr[i] = n | |||||
r.nhash[i] = hash(n) | |||||
} | |||||
return r | |||||
} | |||||
func (r *Rendezvous) Lookup(k string) string { | |||||
// short-circuit if we're empty | |||||
if len(r.nodes) == 0 { | |||||
return "" | |||||
} | |||||
khash := r.hash(k) | |||||
var midx int | |||||
var mhash = xorshiftMult64(khash ^ r.nhash[0]) | |||||
for i, nhash := range r.nhash[1:] { | |||||
if h := xorshiftMult64(khash ^ nhash); h > mhash { | |||||
midx = i + 1 | |||||
mhash = h | |||||
} | |||||
} | |||||
return r.nstr[midx] | |||||
} | |||||
func (r *Rendezvous) Add(node string) { | |||||
r.nodes[node] = len(r.nstr) | |||||
r.nstr = append(r.nstr, node) | |||||
r.nhash = append(r.nhash, r.hash(node)) | |||||
} | |||||
func (r *Rendezvous) Remove(node string) { | |||||
// find index of node to remove | |||||
nidx := r.nodes[node] | |||||
// remove from the slices | |||||
l := len(r.nstr) | |||||
r.nstr[nidx] = r.nstr[l] | |||||
r.nstr = r.nstr[:l] | |||||
r.nhash[nidx] = r.nhash[l] | |||||
r.nhash = r.nhash[:l] | |||||
// update the map | |||||
delete(r.nodes, node) | |||||
moved := r.nstr[nidx] | |||||
r.nodes[moved] = nidx | |||||
} | |||||
func xorshiftMult64(x uint64) uint64 { | |||||
x ^= x >> 12 // a | |||||
x ^= x << 25 // b | |||||
x ^= x >> 27 // c | |||||
return x * 2685821657736338717 | |||||
} |
@@ -0,0 +1,3 @@ | |||||
*.rdb | |||||
testdata/*/ | |||||
.idea/ |
@@ -0,0 +1,27 @@ | |||||
run: | |||||
concurrency: 8 | |||||
deadline: 5m | |||||
tests: false | |||||
linters: | |||||
enable-all: true | |||||
disable: | |||||
- funlen | |||||
- gochecknoglobals | |||||
- gochecknoinits | |||||
- gocognit | |||||
- goconst | |||||
- godox | |||||
- gosec | |||||
- maligned | |||||
- wsl | |||||
- gomnd | |||||
- goerr113 | |||||
- exhaustive | |||||
- nestif | |||||
- nlreturn | |||||
- exhaustivestruct | |||||
- wrapcheck | |||||
- errorlint | |||||
- cyclop | |||||
- forcetypeassert | |||||
- forbidigo |
@@ -0,0 +1,4 @@ | |||||
semi: false | |||||
singleQuote: true | |||||
proseWrap: always | |||||
printWidth: 100 |
@@ -0,0 +1,149 @@ | |||||
## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) | |||||
### Features | |||||
* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) | |||||
* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) | |||||
* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) | |||||
## v8.11 | |||||
- Remove OpenTelemetry metrics. | |||||
- Supports more redis commands and options. | |||||
## v8.10 | |||||
- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a | |||||
single span with a Redis command (instead of 4 spans). There are multiple reasons behind this | |||||
decision: | |||||
- Traces become smaller and less noisy. | |||||
- It may be costly to process those 3 extra spans for each query. | |||||
- go-redis no longer depends on OpenTelemetry. | |||||
Eventually we hope to replace the information that we no longer collect with OpenTelemetry | |||||
Metrics. | |||||
## v8.9 | |||||
- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, | |||||
`WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. | |||||
## v8.8 | |||||
- To make updating easier, extra modules now have the same version as go-redis does. That means that | |||||
you need to update your imports: | |||||
``` | |||||
github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 | |||||
github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 | |||||
``` | |||||
## v8.5 | |||||
- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a | |||||
struct: | |||||
```go | |||||
err := rdb.HGetAll(ctx, "hash").Scan(&data) | |||||
err := rdb.MGet(ctx, "key1", "key2").Scan(&data) | |||||
``` | |||||
- Please check [redismock](https://github.com/go-redis/redismock) by | |||||
[monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. | |||||
## v8 | |||||
- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not | |||||
using `context.Context` yet, the simplest option is to define global package variable | |||||
`var ctx = context.TODO()` and use it when `ctx` is required. | |||||
- Full support for `context.Context` canceling. | |||||
- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. | |||||
- Added `redisext.OpenTemetryHook` that adds | |||||
[Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). | |||||
- Redis slow log support. | |||||
- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move | |||||
existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: | |||||
```go | |||||
import "github.com/golang/groupcache/consistenthash" | |||||
ring := redis.NewRing(&redis.RingOptions{ | |||||
NewConsistentHash: func() { | |||||
return consistenthash.New(100, crc32.ChecksumIEEE) | |||||
}, | |||||
}) | |||||
``` | |||||
- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. | |||||
- `Options.MaxRetries` default value is changed from 0 to 3. | |||||
- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. | |||||
## v7.3 | |||||
- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection | |||||
URL contains username. | |||||
## v7.2 | |||||
- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. | |||||
## v7.1 | |||||
- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` | |||||
interface. | |||||
## v7 | |||||
- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a | |||||
transactional pipeline. | |||||
- WrapProcess is replaced with more convenient AddHook that has access to context.Context. | |||||
- WithContext now can not be used to create a shallow copy of the client. | |||||
- New methods ProcessContext, DoContext, and ExecContext. | |||||
- Client respects Context.Deadline when setting net.Conn deadline. | |||||
- Client listens on Context.Done while waiting for a connection from the pool and returns an error | |||||
when context context is cancelled. | |||||
- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow | |||||
detecting reconnections. | |||||
- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse | |||||
the time. | |||||
- `SetLimiter` is removed and added `Options.Limiter` instead. | |||||
- `HMSet` is deprecated as of Redis v4. | |||||
## v6.15 | |||||
- Cluster and Ring pipelines process commands for each node in its own goroutine. | |||||
## 6.14 | |||||
- Added Options.MinIdleConns. | |||||
- Added Options.MaxConnAge. | |||||
- PoolStats.FreeConns is renamed to PoolStats.IdleConns. | |||||
- Add Client.Do to simplify creating custom commands. | |||||
- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. | |||||
- Lower memory usage. | |||||
## v6.13 | |||||
- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set | |||||
`HashReplicas = 1000` for better keys distribution between shards. | |||||
- Cluster client was optimized to use much less memory when reloading cluster state. | |||||
- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout | |||||
occurres. In most cases it is recommended to use PubSub.Channel instead. | |||||
- Dialer.KeepAlive is set to 5 minutes by default. | |||||
## v6.12 | |||||
- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis | |||||
Servers that don't have cluster mode enabled. See | |||||
https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup |
@@ -0,0 +1,25 @@ | |||||
Copyright (c) 2013 The github.com/go-redis/redis Authors. | |||||
All rights reserved. | |||||
Redistribution and use in source and binary forms, with or without | |||||
modification, are permitted provided that the following conditions are | |||||
met: | |||||
* Redistributions of source code must retain the above copyright | |||||
notice, this list of conditions and the following disclaimer. | |||||
* Redistributions in binary form must reproduce the above | |||||
copyright notice, this list of conditions and the following disclaimer | |||||
in the documentation and/or other materials provided with the | |||||
distribution. | |||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@@ -0,0 +1,35 @@ | |||||
PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) | |||||
test: testdeps | |||||
go test ./... | |||||
go test ./... -short -race | |||||
go test ./... -run=NONE -bench=. -benchmem | |||||
env GOOS=linux GOARCH=386 go test ./... | |||||
go vet | |||||
testdeps: testdata/redis/src/redis-server | |||||
bench: testdeps | |||||
go test ./... -test.run=NONE -test.bench=. -test.benchmem | |||||
.PHONY: all test testdeps bench | |||||
testdata/redis: | |||||
mkdir -p $@ | |||||
wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ | |||||
testdata/redis/src/redis-server: testdata/redis | |||||
cd $< && make all | |||||
fmt: | |||||
gofmt -w -s ./ | |||||
goimports -w -local github.com/go-redis/redis ./ | |||||
go_mod_tidy: | |||||
go get -u && go mod tidy | |||||
set -e; for dir in $(PACKAGE_DIRS); do \ | |||||
echo "go mod tidy in $${dir}"; \ | |||||
(cd "$${dir}" && \ | |||||
go get -u && \ | |||||
go mod tidy); \ | |||||
done |
@@ -0,0 +1,178 @@ | |||||
<p align="center"> | |||||
<a href="https://uptrace.dev/?utm_source=gh-redis&utm_campaign=gh-redis-banner1"> | |||||
<img src="https://raw.githubusercontent.com/uptrace/roadmap/master/banner1.png" alt="All-in-one tool to optimize performance and monitor errors & logs"> | |||||
</a> | |||||
</p> | |||||
# Redis client for Golang | |||||
![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) | |||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) | |||||
[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) | |||||
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) | |||||
- To ask questions, join [Discord](https://discord.gg/rWtp5Aj) or use | |||||
[Discussions](https://github.com/go-redis/redis/discussions). | |||||
- [Newsletter](https://blog.uptrace.dev/pages/newsletter.html) to get latest updates. | |||||
- [Documentation](https://redis.uptrace.dev) | |||||
- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) | |||||
- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) | |||||
- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) | |||||
Other projects you may like: | |||||
- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. | |||||
- [treemux](https://github.com/vmihailenco/treemux) - high-speed, flexible, tree-based HTTP router | |||||
for Go. | |||||
## Ecosystem | |||||
- [Redis Mock](https://github.com/go-redis/redismock). | |||||
- [Distributed Locks](https://github.com/bsm/redislock). | |||||
- [Redis Cache](https://github.com/go-redis/cache). | |||||
- [Rate limiting](https://github.com/go-redis/redis_rate). | |||||
## Features | |||||
- Redis 3 commands except QUIT, MONITOR, and SYNC. | |||||
- Automatic connection pooling with | |||||
[circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. | |||||
- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). | |||||
- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). | |||||
- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and | |||||
[TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). | |||||
- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). | |||||
- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). | |||||
- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). | |||||
- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). | |||||
- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup) | |||||
without using cluster mode and Redis Sentinel. | |||||
- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). | |||||
- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation). | |||||
## Installation | |||||
go-redis supports 2 last Go versions and requires a Go version with | |||||
[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go | |||||
module: | |||||
```shell | |||||
go mod init github.com/my/repo | |||||
``` | |||||
And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): | |||||
```shell | |||||
go get github.com/go-redis/redis/v8 | |||||
``` | |||||
## Quickstart | |||||
```go | |||||
import ( | |||||
"context" | |||||
"github.com/go-redis/redis/v8" | |||||
) | |||||
var ctx = context.Background() | |||||
func ExampleClient() { | |||||
rdb := redis.NewClient(&redis.Options{ | |||||
Addr: "localhost:6379", | |||||
Password: "", // no password set | |||||
DB: 0, // use default DB | |||||
}) | |||||
err := rdb.Set(ctx, "key", "value", 0).Err() | |||||
if err != nil { | |||||
panic(err) | |||||
} | |||||
val, err := rdb.Get(ctx, "key").Result() | |||||
if err != nil { | |||||
panic(err) | |||||
} | |||||
fmt.Println("key", val) | |||||
val2, err := rdb.Get(ctx, "key2").Result() | |||||
if err == redis.Nil { | |||||
fmt.Println("key2 does not exist") | |||||
} else if err != nil { | |||||
panic(err) | |||||
} else { | |||||
fmt.Println("key2", val2) | |||||
} | |||||
// Output: key value | |||||
// key2 does not exist | |||||
} | |||||
``` | |||||
## Look and feel | |||||
Some corner cases: | |||||
```go | |||||
// SET key value EX 10 NX | |||||
set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() | |||||
// SET key value keepttl NX | |||||
set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() | |||||
// SORT list LIMIT 0 2 ASC | |||||
vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() | |||||
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 | |||||
vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ | |||||
Min: "-inf", | |||||
Max: "+inf", | |||||
Offset: 0, | |||||
Count: 2, | |||||
}).Result() | |||||
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM | |||||
vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ | |||||
Keys: []string{"zset1", "zset2"}, | |||||
Weights: []int64{2, 3} | |||||
}).Result() | |||||
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" | |||||
vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() | |||||
// custom command | |||||
res, err := rdb.Do(ctx, "set", "key", "value").Result() | |||||
``` | |||||
## Run the test | |||||
go-redis will start a redis-server and run the test cases. | |||||
The paths of redis-server bin file and redis config file are defined in `main_test.go`: | |||||
``` | |||||
var ( | |||||
redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) | |||||
redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) | |||||
) | |||||
``` | |||||
For local testing, you can change the variables to refer to your local files, or create a soft link | |||||
to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: | |||||
``` | |||||
ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src | |||||
cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ | |||||
``` | |||||
Lastly, run: | |||||
``` | |||||
go test | |||||
``` | |||||
## Contributors | |||||
Thanks to all the people who already contributed! | |||||
<a href="https://github.com/go-redis/redis/graphs/contributors"> | |||||
<img src="https://contributors-img.web.app/image?repo=go-redis/redis" /> | |||||
</a> |
@@ -0,0 +1,15 @@ | |||||
# Releasing | |||||
1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: | |||||
```shell | |||||
TAG=v1.0.0 ./scripts/release.sh | |||||
``` | |||||
2. Open a pull request and wait for the build to finish. | |||||
3. Merge the pull request and run `tag.sh` to create tags for packages: | |||||
```shell | |||||
TAG=v1.0.0 ./scripts/tag.sh | |||||
``` |
@@ -0,0 +1,109 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"sync" | |||||
"sync/atomic" | |||||
) | |||||
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { | |||||
cmd := NewIntCmd(ctx, "dbsize") | |||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { | |||||
var size int64 | |||||
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { | |||||
n, err := master.DBSize(ctx).Result() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
atomic.AddInt64(&size, n) | |||||
return nil | |||||
}) | |||||
if err != nil { | |||||
cmd.SetErr(err) | |||||
} else { | |||||
cmd.val = size | |||||
} | |||||
return nil | |||||
}) | |||||
return cmd | |||||
} | |||||
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { | |||||
cmd := NewStringCmd(ctx, "script", "load", script) | |||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { | |||||
mu := &sync.Mutex{} | |||||
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { | |||||
val, err := shard.ScriptLoad(ctx, script).Result() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
mu.Lock() | |||||
if cmd.Val() == "" { | |||||
cmd.val = val | |||||
} | |||||
mu.Unlock() | |||||
return nil | |||||
}) | |||||
if err != nil { | |||||
cmd.SetErr(err) | |||||
} | |||||
return nil | |||||
}) | |||||
return cmd | |||||
} | |||||
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { | |||||
cmd := NewStatusCmd(ctx, "script", "flush") | |||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { | |||||
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { | |||||
return shard.ScriptFlush(ctx).Err() | |||||
}) | |||||
if err != nil { | |||||
cmd.SetErr(err) | |||||
} | |||||
return nil | |||||
}) | |||||
return cmd | |||||
} | |||||
func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { | |||||
args := make([]interface{}, 2+len(hashes)) | |||||
args[0] = "script" | |||||
args[1] = "exists" | |||||
for i, hash := range hashes { | |||||
args[2+i] = hash | |||||
} | |||||
cmd := NewBoolSliceCmd(ctx, args...) | |||||
result := make([]bool, len(hashes)) | |||||
for i := range result { | |||||
result[i] = true | |||||
} | |||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { | |||||
mu := &sync.Mutex{} | |||||
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { | |||||
val, err := shard.ScriptExists(ctx, hashes...).Result() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
mu.Lock() | |||||
for i, v := range val { | |||||
result[i] = result[i] && v | |||||
} | |||||
mu.Unlock() | |||||
return nil | |||||
}) | |||||
if err != nil { | |||||
cmd.SetErr(err) | |||||
} else { | |||||
cmd.val = result | |||||
} | |||||
return nil | |||||
}) | |||||
return cmd | |||||
} |
@@ -0,0 +1,4 @@ | |||||
/* | |||||
Package redis implements a Redis client. | |||||
*/ | |||||
package redis |
@@ -0,0 +1,144 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"io" | |||||
"net" | |||||
"strings" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
"github.com/go-redis/redis/v8/internal/proto" | |||||
) | |||||
// ErrClosed performs any operation on the closed client will return this error. | |||||
var ErrClosed = pool.ErrClosed | |||||
type Error interface { | |||||
error | |||||
// RedisError is a no-op function but | |||||
// serves to distinguish types that are Redis | |||||
// errors from ordinary errors: a type is a | |||||
// Redis error if it has a RedisError method. | |||||
RedisError() | |||||
} | |||||
var _ Error = proto.RedisError("") | |||||
func shouldRetry(err error, retryTimeout bool) bool { | |||||
switch err { | |||||
case io.EOF, io.ErrUnexpectedEOF: | |||||
return true | |||||
case nil, context.Canceled, context.DeadlineExceeded: | |||||
return false | |||||
} | |||||
if v, ok := err.(timeoutError); ok { | |||||
if v.Timeout() { | |||||
return retryTimeout | |||||
} | |||||
return true | |||||
} | |||||
s := err.Error() | |||||
if s == "ERR max number of clients reached" { | |||||
return true | |||||
} | |||||
if strings.HasPrefix(s, "LOADING ") { | |||||
return true | |||||
} | |||||
if strings.HasPrefix(s, "READONLY ") { | |||||
return true | |||||
} | |||||
if strings.HasPrefix(s, "CLUSTERDOWN ") { | |||||
return true | |||||
} | |||||
if strings.HasPrefix(s, "TRYAGAIN ") { | |||||
return true | |||||
} | |||||
return false | |||||
} | |||||
func isRedisError(err error) bool { | |||||
_, ok := err.(proto.RedisError) | |||||
return ok | |||||
} | |||||
func isBadConn(err error, allowTimeout bool, addr string) bool { | |||||
switch err { | |||||
case nil: | |||||
return false | |||||
case context.Canceled, context.DeadlineExceeded: | |||||
return true | |||||
} | |||||
if isRedisError(err) { | |||||
switch { | |||||
case isReadOnlyError(err): | |||||
// Close connections in read only state in case domain addr is used | |||||
// and domain resolves to a different Redis Server. See #790. | |||||
return true | |||||
case isMovedSameConnAddr(err, addr): | |||||
// Close connections when we are asked to move to the same addr | |||||
// of the connection. Force a DNS resolution when all connections | |||||
// of the pool are recycled | |||||
return true | |||||
default: | |||||
return false | |||||
} | |||||
} | |||||
if allowTimeout { | |||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() { | |||||
return !netErr.Temporary() | |||||
} | |||||
} | |||||
return true | |||||
} | |||||
func isMovedError(err error) (moved bool, ask bool, addr string) { | |||||
if !isRedisError(err) { | |||||
return | |||||
} | |||||
s := err.Error() | |||||
switch { | |||||
case strings.HasPrefix(s, "MOVED "): | |||||
moved = true | |||||
case strings.HasPrefix(s, "ASK "): | |||||
ask = true | |||||
default: | |||||
return | |||||
} | |||||
ind := strings.LastIndex(s, " ") | |||||
if ind == -1 { | |||||
return false, false, "" | |||||
} | |||||
addr = s[ind+1:] | |||||
return | |||||
} | |||||
func isLoadingError(err error) bool { | |||||
return strings.HasPrefix(err.Error(), "LOADING ") | |||||
} | |||||
func isReadOnlyError(err error) bool { | |||||
return strings.HasPrefix(err.Error(), "READONLY ") | |||||
} | |||||
func isMovedSameConnAddr(err error, addr string) bool { | |||||
redisError := err.Error() | |||||
if !strings.HasPrefix(redisError, "MOVED ") { | |||||
return false | |||||
} | |||||
return strings.HasSuffix(redisError, addr) | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type timeoutError interface { | |||||
Timeout() bool | |||||
} |
@@ -0,0 +1,56 @@ | |||||
package internal | |||||
import ( | |||||
"fmt" | |||||
"strconv" | |||||
"time" | |||||
) | |||||
func AppendArg(b []byte, v interface{}) []byte { | |||||
switch v := v.(type) { | |||||
case nil: | |||||
return append(b, "<nil>"...) | |||||
case string: | |||||
return appendUTF8String(b, Bytes(v)) | |||||
case []byte: | |||||
return appendUTF8String(b, v) | |||||
case int: | |||||
return strconv.AppendInt(b, int64(v), 10) | |||||
case int8: | |||||
return strconv.AppendInt(b, int64(v), 10) | |||||
case int16: | |||||
return strconv.AppendInt(b, int64(v), 10) | |||||
case int32: | |||||
return strconv.AppendInt(b, int64(v), 10) | |||||
case int64: | |||||
return strconv.AppendInt(b, v, 10) | |||||
case uint: | |||||
return strconv.AppendUint(b, uint64(v), 10) | |||||
case uint8: | |||||
return strconv.AppendUint(b, uint64(v), 10) | |||||
case uint16: | |||||
return strconv.AppendUint(b, uint64(v), 10) | |||||
case uint32: | |||||
return strconv.AppendUint(b, uint64(v), 10) | |||||
case uint64: | |||||
return strconv.AppendUint(b, v, 10) | |||||
case float32: | |||||
return strconv.AppendFloat(b, float64(v), 'f', -1, 64) | |||||
case float64: | |||||
return strconv.AppendFloat(b, v, 'f', -1, 64) | |||||
case bool: | |||||
if v { | |||||
return append(b, "true"...) | |||||
} | |||||
return append(b, "false"...) | |||||
case time.Time: | |||||
return v.AppendFormat(b, time.RFC3339Nano) | |||||
default: | |||||
return append(b, fmt.Sprint(v)...) | |||||
} | |||||
} | |||||
func appendUTF8String(dst []byte, src []byte) []byte { | |||||
dst = append(dst, src...) | |||||
return dst | |||||
} |
@@ -0,0 +1,78 @@ | |||||
package hashtag | |||||
import ( | |||||
"strings" | |||||
"github.com/go-redis/redis/v8/internal/rand" | |||||
) | |||||
const slotNumber = 16384 | |||||
// CRC16 implementation according to CCITT standards. | |||||
// Copyright 2001-2010 Georges Menie (www.menie.org) | |||||
// Copyright 2013 The Go Authors. All rights reserved. | |||||
// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c | |||||
var crc16tab = [256]uint16{ | |||||
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, | |||||
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, | |||||
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, | |||||
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, | |||||
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, | |||||
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, | |||||
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, | |||||
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, | |||||
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, | |||||
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, | |||||
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, | |||||
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, | |||||
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, | |||||
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, | |||||
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, | |||||
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, | |||||
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, | |||||
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, | |||||
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, | |||||
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, | |||||
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, | |||||
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, | |||||
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, | |||||
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, | |||||
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, | |||||
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, | |||||
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, | |||||
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, | |||||
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, | |||||
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, | |||||
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, | |||||
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, | |||||
} | |||||
func Key(key string) string { | |||||
if s := strings.IndexByte(key, '{'); s > -1 { | |||||
if e := strings.IndexByte(key[s+1:], '}'); e > 0 { | |||||
return key[s+1 : s+e+1] | |||||
} | |||||
} | |||||
return key | |||||
} | |||||
func RandomSlot() int { | |||||
return rand.Intn(slotNumber) | |||||
} | |||||
// Slot returns a consistent slot number between 0 and 16383 | |||||
// for any given string key. | |||||
func Slot(key string) int { | |||||
if key == "" { | |||||
return RandomSlot() | |||||
} | |||||
key = Key(key) | |||||
return int(crc16sum(key)) % slotNumber | |||||
} | |||||
func crc16sum(key string) (crc uint16) { | |||||
for i := 0; i < len(key); i++ { | |||||
crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] | |||||
} | |||||
return | |||||
} |
@@ -0,0 +1,201 @@ | |||||
package hscan | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"reflect" | |||||
"strconv" | |||||
) | |||||
// decoderFunc represents decoding functions for default built-in types. | |||||
type decoderFunc func(reflect.Value, string) error | |||||
var ( | |||||
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1). | |||||
decoders = []decoderFunc{ | |||||
reflect.Bool: decodeBool, | |||||
reflect.Int: decodeInt, | |||||
reflect.Int8: decodeInt8, | |||||
reflect.Int16: decodeInt16, | |||||
reflect.Int32: decodeInt32, | |||||
reflect.Int64: decodeInt64, | |||||
reflect.Uint: decodeUint, | |||||
reflect.Uint8: decodeUint8, | |||||
reflect.Uint16: decodeUint16, | |||||
reflect.Uint32: decodeUint32, | |||||
reflect.Uint64: decodeUint64, | |||||
reflect.Float32: decodeFloat32, | |||||
reflect.Float64: decodeFloat64, | |||||
reflect.Complex64: decodeUnsupported, | |||||
reflect.Complex128: decodeUnsupported, | |||||
reflect.Array: decodeUnsupported, | |||||
reflect.Chan: decodeUnsupported, | |||||
reflect.Func: decodeUnsupported, | |||||
reflect.Interface: decodeUnsupported, | |||||
reflect.Map: decodeUnsupported, | |||||
reflect.Ptr: decodeUnsupported, | |||||
reflect.Slice: decodeSlice, | |||||
reflect.String: decodeString, | |||||
reflect.Struct: decodeUnsupported, | |||||
reflect.UnsafePointer: decodeUnsupported, | |||||
} | |||||
// Global map of struct field specs that is populated once for every new | |||||
// struct type that is scanned. This caches the field types and the corresponding | |||||
// decoder functions to avoid iterating through struct fields on subsequent scans. | |||||
globalStructMap = newStructMap() | |||||
) | |||||
func Struct(dst interface{}) (StructValue, error) { | |||||
v := reflect.ValueOf(dst) | |||||
// The destination to scan into should be a struct pointer. | |||||
if v.Kind() != reflect.Ptr || v.IsNil() { | |||||
return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst) | |||||
} | |||||
v = v.Elem() | |||||
if v.Kind() != reflect.Struct { | |||||
return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst) | |||||
} | |||||
return StructValue{ | |||||
spec: globalStructMap.get(v.Type()), | |||||
value: v, | |||||
}, nil | |||||
} | |||||
// Scan scans the results from a key-value Redis map result set to a destination struct. | |||||
// The Redis keys are matched to the struct's field with the `redis` tag. | |||||
func Scan(dst interface{}, keys []interface{}, vals []interface{}) error { | |||||
if len(keys) != len(vals) { | |||||
return errors.New("args should have the same number of keys and vals") | |||||
} | |||||
strct, err := Struct(dst) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// Iterate through the (key, value) sequence. | |||||
for i := 0; i < len(vals); i++ { | |||||
key, ok := keys[i].(string) | |||||
if !ok { | |||||
continue | |||||
} | |||||
val, ok := vals[i].(string) | |||||
if !ok { | |||||
continue | |||||
} | |||||
if err := strct.Scan(key, val); err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func decodeBool(f reflect.Value, s string) error { | |||||
b, err := strconv.ParseBool(s) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
f.SetBool(b) | |||||
return nil | |||||
} | |||||
func decodeInt8(f reflect.Value, s string) error { | |||||
return decodeNumber(f, s, 8) | |||||
} | |||||
func decodeInt16(f reflect.Value, s string) error { | |||||
return decodeNumber(f, s, 16) | |||||
} | |||||
func decodeInt32(f reflect.Value, s string) error { | |||||
return decodeNumber(f, s, 32) | |||||
} | |||||
func decodeInt64(f reflect.Value, s string) error { | |||||
return decodeNumber(f, s, 64) | |||||
} | |||||
func decodeInt(f reflect.Value, s string) error { | |||||
return decodeNumber(f, s, 0) | |||||
} | |||||
func decodeNumber(f reflect.Value, s string, bitSize int) error { | |||||
v, err := strconv.ParseInt(s, 10, bitSize) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
f.SetInt(v) | |||||
return nil | |||||
} | |||||
func decodeUint8(f reflect.Value, s string) error { | |||||
return decodeUnsignedNumber(f, s, 8) | |||||
} | |||||
func decodeUint16(f reflect.Value, s string) error { | |||||
return decodeUnsignedNumber(f, s, 16) | |||||
} | |||||
func decodeUint32(f reflect.Value, s string) error { | |||||
return decodeUnsignedNumber(f, s, 32) | |||||
} | |||||
func decodeUint64(f reflect.Value, s string) error { | |||||
return decodeUnsignedNumber(f, s, 64) | |||||
} | |||||
func decodeUint(f reflect.Value, s string) error { | |||||
return decodeUnsignedNumber(f, s, 0) | |||||
} | |||||
func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error { | |||||
v, err := strconv.ParseUint(s, 10, bitSize) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
f.SetUint(v) | |||||
return nil | |||||
} | |||||
func decodeFloat32(f reflect.Value, s string) error { | |||||
v, err := strconv.ParseFloat(s, 32) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
f.SetFloat(v) | |||||
return nil | |||||
} | |||||
// although the default is float64, but we better define it. | |||||
func decodeFloat64(f reflect.Value, s string) error { | |||||
v, err := strconv.ParseFloat(s, 64) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
f.SetFloat(v) | |||||
return nil | |||||
} | |||||
func decodeString(f reflect.Value, s string) error { | |||||
f.SetString(s) | |||||
return nil | |||||
} | |||||
func decodeSlice(f reflect.Value, s string) error { | |||||
// []byte slice ([]uint8). | |||||
if f.Type().Elem().Kind() == reflect.Uint8 { | |||||
f.SetBytes([]byte(s)) | |||||
} | |||||
return nil | |||||
} | |||||
func decodeUnsupported(v reflect.Value, s string) error { | |||||
return fmt.Errorf("redis.Scan(unsupported %s)", v.Type()) | |||||
} |
@@ -0,0 +1,93 @@ | |||||
package hscan | |||||
import ( | |||||
"fmt" | |||||
"reflect" | |||||
"strings" | |||||
"sync" | |||||
) | |||||
// structMap contains the map of struct fields for target structs | |||||
// indexed by the struct type. | |||||
type structMap struct { | |||||
m sync.Map | |||||
} | |||||
func newStructMap() *structMap { | |||||
return new(structMap) | |||||
} | |||||
func (s *structMap) get(t reflect.Type) *structSpec { | |||||
if v, ok := s.m.Load(t); ok { | |||||
return v.(*structSpec) | |||||
} | |||||
spec := newStructSpec(t, "redis") | |||||
s.m.Store(t, spec) | |||||
return spec | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// structSpec contains the list of all fields in a target struct. | |||||
type structSpec struct { | |||||
m map[string]*structField | |||||
} | |||||
func (s *structSpec) set(tag string, sf *structField) { | |||||
s.m[tag] = sf | |||||
} | |||||
func newStructSpec(t reflect.Type, fieldTag string) *structSpec { | |||||
numField := t.NumField() | |||||
out := &structSpec{ | |||||
m: make(map[string]*structField, numField), | |||||
} | |||||
for i := 0; i < numField; i++ { | |||||
f := t.Field(i) | |||||
tag := f.Tag.Get(fieldTag) | |||||
if tag == "" || tag == "-" { | |||||
continue | |||||
} | |||||
tag = strings.Split(tag, ",")[0] | |||||
if tag == "" { | |||||
continue | |||||
} | |||||
// Use the built-in decoder. | |||||
out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]}) | |||||
} | |||||
return out | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// structField represents a single field in a target struct. | |||||
type structField struct { | |||||
index int | |||||
fn decoderFunc | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type StructValue struct { | |||||
spec *structSpec | |||||
value reflect.Value | |||||
} | |||||
func (s StructValue) Scan(key string, value string) error { | |||||
field, ok := s.spec.m[key] | |||||
if !ok { | |||||
return nil | |||||
} | |||||
if err := field.fn(s.value.Field(field.index), value); err != nil { | |||||
t := s.value.Type() | |||||
return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s", | |||||
value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error()) | |||||
} | |||||
return nil | |||||
} |
@@ -0,0 +1,29 @@ | |||||
package internal | |||||
import ( | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal/rand" | |||||
) | |||||
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { | |||||
if retry < 0 { | |||||
panic("not reached") | |||||
} | |||||
if minBackoff == 0 { | |||||
return 0 | |||||
} | |||||
d := minBackoff << uint(retry) | |||||
if d < minBackoff { | |||||
return maxBackoff | |||||
} | |||||
d = minBackoff + time.Duration(rand.Int63n(int64(d))) | |||||
if d > maxBackoff || d < minBackoff { | |||||
d = maxBackoff | |||||
} | |||||
return d | |||||
} |
@@ -0,0 +1,26 @@ | |||||
package internal | |||||
import ( | |||||
"context" | |||||
"fmt" | |||||
"log" | |||||
"os" | |||||
) | |||||
type Logging interface { | |||||
Printf(ctx context.Context, format string, v ...interface{}) | |||||
} | |||||
type logger struct { | |||||
log *log.Logger | |||||
} | |||||
func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) { | |||||
_ = l.log.Output(2, fmt.Sprintf(format, v...)) | |||||
} | |||||
// Logger calls Output to print to the stderr. | |||||
// Arguments are handled in the manner of fmt.Print. | |||||
var Logger Logging = &logger{ | |||||
log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), | |||||
} |
@@ -0,0 +1,60 @@ | |||||
/* | |||||
Copyright 2014 The Camlistore Authors | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package internal | |||||
import ( | |||||
"sync" | |||||
"sync/atomic" | |||||
) | |||||
// A Once will perform a successful action exactly once. | |||||
// | |||||
// Unlike a sync.Once, this Once's func returns an error | |||||
// and is re-armed on failure. | |||||
type Once struct { | |||||
m sync.Mutex | |||||
done uint32 | |||||
} | |||||
// Do calls the function f if and only if Do has not been invoked | |||||
// without error for this instance of Once. In other words, given | |||||
// var once Once | |||||
// if once.Do(f) is called multiple times, only the first call will | |||||
// invoke f, even if f has a different value in each invocation unless | |||||
// f returns an error. A new instance of Once is required for each | |||||
// function to execute. | |||||
// | |||||
// Do is intended for initialization that must be run exactly once. Since f | |||||
// is niladic, it may be necessary to use a function literal to capture the | |||||
// arguments to a function to be invoked by Do: | |||||
// err := config.once.Do(func() error { return config.init(filename) }) | |||||
func (o *Once) Do(f func() error) error { | |||||
if atomic.LoadUint32(&o.done) == 1 { | |||||
return nil | |||||
} | |||||
// Slow-path. | |||||
o.m.Lock() | |||||
defer o.m.Unlock() | |||||
var err error | |||||
if o.done == 0 { | |||||
err = f() | |||||
if err == nil { | |||||
atomic.StoreUint32(&o.done, 1) | |||||
} | |||||
} | |||||
return err | |||||
} |
@@ -0,0 +1,121 @@ | |||||
package pool | |||||
import ( | |||||
"bufio" | |||||
"context" | |||||
"net" | |||||
"sync/atomic" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal/proto" | |||||
) | |||||
var noDeadline = time.Time{} | |||||
type Conn struct { | |||||
usedAt int64 // atomic | |||||
netConn net.Conn | |||||
rd *proto.Reader | |||||
bw *bufio.Writer | |||||
wr *proto.Writer | |||||
Inited bool | |||||
pooled bool | |||||
createdAt time.Time | |||||
} | |||||
func NewConn(netConn net.Conn) *Conn { | |||||
cn := &Conn{ | |||||
netConn: netConn, | |||||
createdAt: time.Now(), | |||||
} | |||||
cn.rd = proto.NewReader(netConn) | |||||
cn.bw = bufio.NewWriter(netConn) | |||||
cn.wr = proto.NewWriter(cn.bw) | |||||
cn.SetUsedAt(time.Now()) | |||||
return cn | |||||
} | |||||
func (cn *Conn) UsedAt() time.Time { | |||||
unix := atomic.LoadInt64(&cn.usedAt) | |||||
return time.Unix(unix, 0) | |||||
} | |||||
func (cn *Conn) SetUsedAt(tm time.Time) { | |||||
atomic.StoreInt64(&cn.usedAt, tm.Unix()) | |||||
} | |||||
func (cn *Conn) SetNetConn(netConn net.Conn) { | |||||
cn.netConn = netConn | |||||
cn.rd.Reset(netConn) | |||||
cn.bw.Reset(netConn) | |||||
} | |||||
func (cn *Conn) Write(b []byte) (int, error) { | |||||
return cn.netConn.Write(b) | |||||
} | |||||
func (cn *Conn) RemoteAddr() net.Addr { | |||||
if cn.netConn != nil { | |||||
return cn.netConn.RemoteAddr() | |||||
} | |||||
return nil | |||||
} | |||||
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error { | |||||
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil { | |||||
return err | |||||
} | |||||
return fn(cn.rd) | |||||
} | |||||
func (cn *Conn) WithWriter( | |||||
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error, | |||||
) error { | |||||
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil { | |||||
return err | |||||
} | |||||
if cn.bw.Buffered() > 0 { | |||||
cn.bw.Reset(cn.netConn) | |||||
} | |||||
if err := fn(cn.wr); err != nil { | |||||
return err | |||||
} | |||||
return cn.bw.Flush() | |||||
} | |||||
func (cn *Conn) Close() error { | |||||
return cn.netConn.Close() | |||||
} | |||||
func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time { | |||||
tm := time.Now() | |||||
cn.SetUsedAt(tm) | |||||
if timeout > 0 { | |||||
tm = tm.Add(timeout) | |||||
} | |||||
if ctx != nil { | |||||
deadline, ok := ctx.Deadline() | |||||
if ok { | |||||
if timeout == 0 { | |||||
return deadline | |||||
} | |||||
if deadline.Before(tm) { | |||||
return deadline | |||||
} | |||||
return tm | |||||
} | |||||
} | |||||
if timeout > 0 { | |||||
return tm | |||||
} | |||||
return noDeadline | |||||
} |
@@ -0,0 +1,557 @@ | |||||
package pool | |||||
import ( | |||||
"context" | |||||
"errors" | |||||
"net" | |||||
"sync" | |||||
"sync/atomic" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal" | |||||
) | |||||
var ( | |||||
// ErrClosed performs any operation on the closed client will return this error. | |||||
ErrClosed = errors.New("redis: client is closed") | |||||
// ErrPoolTimeout timed out waiting to get a connection from the connection pool. | |||||
ErrPoolTimeout = errors.New("redis: connection pool timeout") | |||||
) | |||||
var timers = sync.Pool{ | |||||
New: func() interface{} { | |||||
t := time.NewTimer(time.Hour) | |||||
t.Stop() | |||||
return t | |||||
}, | |||||
} | |||||
// Stats contains pool state information and accumulated stats. | |||||
type Stats struct { | |||||
Hits uint32 // number of times free connection was found in the pool | |||||
Misses uint32 // number of times free connection was NOT found in the pool | |||||
Timeouts uint32 // number of times a wait timeout occurred | |||||
TotalConns uint32 // number of total connections in the pool | |||||
IdleConns uint32 // number of idle connections in the pool | |||||
StaleConns uint32 // number of stale connections removed from the pool | |||||
} | |||||
type Pooler interface { | |||||
NewConn(context.Context) (*Conn, error) | |||||
CloseConn(*Conn) error | |||||
Get(context.Context) (*Conn, error) | |||||
Put(context.Context, *Conn) | |||||
Remove(context.Context, *Conn, error) | |||||
Len() int | |||||
IdleLen() int | |||||
Stats() *Stats | |||||
Close() error | |||||
} | |||||
type Options struct { | |||||
Dialer func(context.Context) (net.Conn, error) | |||||
OnClose func(*Conn) error | |||||
PoolFIFO bool | |||||
PoolSize int | |||||
MinIdleConns int | |||||
MaxConnAge time.Duration | |||||
PoolTimeout time.Duration | |||||
IdleTimeout time.Duration | |||||
IdleCheckFrequency time.Duration | |||||
} | |||||
type lastDialErrorWrap struct { | |||||
err error | |||||
} | |||||
type ConnPool struct { | |||||
opt *Options | |||||
dialErrorsNum uint32 // atomic | |||||
lastDialError atomic.Value | |||||
queue chan struct{} | |||||
connsMu sync.Mutex | |||||
conns []*Conn | |||||
idleConns []*Conn | |||||
poolSize int | |||||
idleConnsLen int | |||||
stats Stats | |||||
_closed uint32 // atomic | |||||
closedCh chan struct{} | |||||
} | |||||
var _ Pooler = (*ConnPool)(nil) | |||||
func NewConnPool(opt *Options) *ConnPool { | |||||
p := &ConnPool{ | |||||
opt: opt, | |||||
queue: make(chan struct{}, opt.PoolSize), | |||||
conns: make([]*Conn, 0, opt.PoolSize), | |||||
idleConns: make([]*Conn, 0, opt.PoolSize), | |||||
closedCh: make(chan struct{}), | |||||
} | |||||
p.connsMu.Lock() | |||||
p.checkMinIdleConns() | |||||
p.connsMu.Unlock() | |||||
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { | |||||
go p.reaper(opt.IdleCheckFrequency) | |||||
} | |||||
return p | |||||
} | |||||
func (p *ConnPool) checkMinIdleConns() { | |||||
if p.opt.MinIdleConns == 0 { | |||||
return | |||||
} | |||||
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns { | |||||
p.poolSize++ | |||||
p.idleConnsLen++ | |||||
go func() { | |||||
err := p.addIdleConn() | |||||
if err != nil && err != ErrClosed { | |||||
p.connsMu.Lock() | |||||
p.poolSize-- | |||||
p.idleConnsLen-- | |||||
p.connsMu.Unlock() | |||||
} | |||||
}() | |||||
} | |||||
} | |||||
func (p *ConnPool) addIdleConn() error { | |||||
cn, err := p.dialConn(context.TODO(), true) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
p.connsMu.Lock() | |||||
defer p.connsMu.Unlock() | |||||
// It is not allowed to add new connections to the closed connection pool. | |||||
if p.closed() { | |||||
_ = cn.Close() | |||||
return ErrClosed | |||||
} | |||||
p.conns = append(p.conns, cn) | |||||
p.idleConns = append(p.idleConns, cn) | |||||
return nil | |||||
} | |||||
func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) { | |||||
return p.newConn(ctx, false) | |||||
} | |||||
func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) { | |||||
cn, err := p.dialConn(ctx, pooled) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
p.connsMu.Lock() | |||||
defer p.connsMu.Unlock() | |||||
// It is not allowed to add new connections to the closed connection pool. | |||||
if p.closed() { | |||||
_ = cn.Close() | |||||
return nil, ErrClosed | |||||
} | |||||
p.conns = append(p.conns, cn) | |||||
if pooled { | |||||
// If pool is full remove the cn on next Put. | |||||
if p.poolSize >= p.opt.PoolSize { | |||||
cn.pooled = false | |||||
} else { | |||||
p.poolSize++ | |||||
} | |||||
} | |||||
return cn, nil | |||||
} | |||||
func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) { | |||||
if p.closed() { | |||||
return nil, ErrClosed | |||||
} | |||||
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { | |||||
return nil, p.getLastDialError() | |||||
} | |||||
netConn, err := p.opt.Dialer(ctx) | |||||
if err != nil { | |||||
p.setLastDialError(err) | |||||
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { | |||||
go p.tryDial() | |||||
} | |||||
return nil, err | |||||
} | |||||
cn := NewConn(netConn) | |||||
cn.pooled = pooled | |||||
return cn, nil | |||||
} | |||||
func (p *ConnPool) tryDial() { | |||||
for { | |||||
if p.closed() { | |||||
return | |||||
} | |||||
conn, err := p.opt.Dialer(context.Background()) | |||||
if err != nil { | |||||
p.setLastDialError(err) | |||||
time.Sleep(time.Second) | |||||
continue | |||||
} | |||||
atomic.StoreUint32(&p.dialErrorsNum, 0) | |||||
_ = conn.Close() | |||||
return | |||||
} | |||||
} | |||||
func (p *ConnPool) setLastDialError(err error) { | |||||
p.lastDialError.Store(&lastDialErrorWrap{err: err}) | |||||
} | |||||
func (p *ConnPool) getLastDialError() error { | |||||
err, _ := p.lastDialError.Load().(*lastDialErrorWrap) | |||||
if err != nil { | |||||
return err.err | |||||
} | |||||
return nil | |||||
} | |||||
// Get returns existed connection from the pool or creates a new one. | |||||
func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { | |||||
if p.closed() { | |||||
return nil, ErrClosed | |||||
} | |||||
if err := p.waitTurn(ctx); err != nil { | |||||
return nil, err | |||||
} | |||||
for { | |||||
p.connsMu.Lock() | |||||
cn, err := p.popIdle() | |||||
p.connsMu.Unlock() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if cn == nil { | |||||
break | |||||
} | |||||
if p.isStaleConn(cn) { | |||||
_ = p.CloseConn(cn) | |||||
continue | |||||
} | |||||
atomic.AddUint32(&p.stats.Hits, 1) | |||||
return cn, nil | |||||
} | |||||
atomic.AddUint32(&p.stats.Misses, 1) | |||||
newcn, err := p.newConn(ctx, true) | |||||
if err != nil { | |||||
p.freeTurn() | |||||
return nil, err | |||||
} | |||||
return newcn, nil | |||||
} | |||||
func (p *ConnPool) getTurn() { | |||||
p.queue <- struct{}{} | |||||
} | |||||
func (p *ConnPool) waitTurn(ctx context.Context) error { | |||||
select { | |||||
case <-ctx.Done(): | |||||
return ctx.Err() | |||||
default: | |||||
} | |||||
select { | |||||
case p.queue <- struct{}{}: | |||||
return nil | |||||
default: | |||||
} | |||||
timer := timers.Get().(*time.Timer) | |||||
timer.Reset(p.opt.PoolTimeout) | |||||
select { | |||||
case <-ctx.Done(): | |||||
if !timer.Stop() { | |||||
<-timer.C | |||||
} | |||||
timers.Put(timer) | |||||
return ctx.Err() | |||||
case p.queue <- struct{}{}: | |||||
if !timer.Stop() { | |||||
<-timer.C | |||||
} | |||||
timers.Put(timer) | |||||
return nil | |||||
case <-timer.C: | |||||
timers.Put(timer) | |||||
atomic.AddUint32(&p.stats.Timeouts, 1) | |||||
return ErrPoolTimeout | |||||
} | |||||
} | |||||
func (p *ConnPool) freeTurn() { | |||||
<-p.queue | |||||
} | |||||
func (p *ConnPool) popIdle() (*Conn, error) { | |||||
if p.closed() { | |||||
return nil, ErrClosed | |||||
} | |||||
n := len(p.idleConns) | |||||
if n == 0 { | |||||
return nil, nil | |||||
} | |||||
var cn *Conn | |||||
if p.opt.PoolFIFO { | |||||
cn = p.idleConns[0] | |||||
copy(p.idleConns, p.idleConns[1:]) | |||||
p.idleConns = p.idleConns[:n-1] | |||||
} else { | |||||
idx := n - 1 | |||||
cn = p.idleConns[idx] | |||||
p.idleConns = p.idleConns[:idx] | |||||
} | |||||
p.idleConnsLen-- | |||||
p.checkMinIdleConns() | |||||
return cn, nil | |||||
} | |||||
func (p *ConnPool) Put(ctx context.Context, cn *Conn) { | |||||
if cn.rd.Buffered() > 0 { | |||||
internal.Logger.Printf(ctx, "Conn has unread data") | |||||
p.Remove(ctx, cn, BadConnError{}) | |||||
return | |||||
} | |||||
if !cn.pooled { | |||||
p.Remove(ctx, cn, nil) | |||||
return | |||||
} | |||||
p.connsMu.Lock() | |||||
p.idleConns = append(p.idleConns, cn) | |||||
p.idleConnsLen++ | |||||
p.connsMu.Unlock() | |||||
p.freeTurn() | |||||
} | |||||
func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) { | |||||
p.removeConnWithLock(cn) | |||||
p.freeTurn() | |||||
_ = p.closeConn(cn) | |||||
} | |||||
func (p *ConnPool) CloseConn(cn *Conn) error { | |||||
p.removeConnWithLock(cn) | |||||
return p.closeConn(cn) | |||||
} | |||||
func (p *ConnPool) removeConnWithLock(cn *Conn) { | |||||
p.connsMu.Lock() | |||||
p.removeConn(cn) | |||||
p.connsMu.Unlock() | |||||
} | |||||
func (p *ConnPool) removeConn(cn *Conn) { | |||||
for i, c := range p.conns { | |||||
if c == cn { | |||||
p.conns = append(p.conns[:i], p.conns[i+1:]...) | |||||
if cn.pooled { | |||||
p.poolSize-- | |||||
p.checkMinIdleConns() | |||||
} | |||||
return | |||||
} | |||||
} | |||||
} | |||||
func (p *ConnPool) closeConn(cn *Conn) error { | |||||
if p.opt.OnClose != nil { | |||||
_ = p.opt.OnClose(cn) | |||||
} | |||||
return cn.Close() | |||||
} | |||||
// Len returns total number of connections. | |||||
func (p *ConnPool) Len() int { | |||||
p.connsMu.Lock() | |||||
n := len(p.conns) | |||||
p.connsMu.Unlock() | |||||
return n | |||||
} | |||||
// IdleLen returns number of idle connections. | |||||
func (p *ConnPool) IdleLen() int { | |||||
p.connsMu.Lock() | |||||
n := p.idleConnsLen | |||||
p.connsMu.Unlock() | |||||
return n | |||||
} | |||||
func (p *ConnPool) Stats() *Stats { | |||||
idleLen := p.IdleLen() | |||||
return &Stats{ | |||||
Hits: atomic.LoadUint32(&p.stats.Hits), | |||||
Misses: atomic.LoadUint32(&p.stats.Misses), | |||||
Timeouts: atomic.LoadUint32(&p.stats.Timeouts), | |||||
TotalConns: uint32(p.Len()), | |||||
IdleConns: uint32(idleLen), | |||||
StaleConns: atomic.LoadUint32(&p.stats.StaleConns), | |||||
} | |||||
} | |||||
func (p *ConnPool) closed() bool { | |||||
return atomic.LoadUint32(&p._closed) == 1 | |||||
} | |||||
func (p *ConnPool) Filter(fn func(*Conn) bool) error { | |||||
p.connsMu.Lock() | |||||
defer p.connsMu.Unlock() | |||||
var firstErr error | |||||
for _, cn := range p.conns { | |||||
if fn(cn) { | |||||
if err := p.closeConn(cn); err != nil && firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
} | |||||
} | |||||
return firstErr | |||||
} | |||||
func (p *ConnPool) Close() error { | |||||
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { | |||||
return ErrClosed | |||||
} | |||||
close(p.closedCh) | |||||
var firstErr error | |||||
p.connsMu.Lock() | |||||
for _, cn := range p.conns { | |||||
if err := p.closeConn(cn); err != nil && firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
} | |||||
p.conns = nil | |||||
p.poolSize = 0 | |||||
p.idleConns = nil | |||||
p.idleConnsLen = 0 | |||||
p.connsMu.Unlock() | |||||
return firstErr | |||||
} | |||||
func (p *ConnPool) reaper(frequency time.Duration) { | |||||
ticker := time.NewTicker(frequency) | |||||
defer ticker.Stop() | |||||
for { | |||||
select { | |||||
case <-ticker.C: | |||||
// It is possible that ticker and closedCh arrive together, | |||||
// and select pseudo-randomly pick ticker case, we double | |||||
// check here to prevent being executed after closed. | |||||
if p.closed() { | |||||
return | |||||
} | |||||
_, err := p.ReapStaleConns() | |||||
if err != nil { | |||||
internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err) | |||||
continue | |||||
} | |||||
case <-p.closedCh: | |||||
return | |||||
} | |||||
} | |||||
} | |||||
func (p *ConnPool) ReapStaleConns() (int, error) { | |||||
var n int | |||||
for { | |||||
p.getTurn() | |||||
p.connsMu.Lock() | |||||
cn := p.reapStaleConn() | |||||
p.connsMu.Unlock() | |||||
p.freeTurn() | |||||
if cn != nil { | |||||
_ = p.closeConn(cn) | |||||
n++ | |||||
} else { | |||||
break | |||||
} | |||||
} | |||||
atomic.AddUint32(&p.stats.StaleConns, uint32(n)) | |||||
return n, nil | |||||
} | |||||
func (p *ConnPool) reapStaleConn() *Conn { | |||||
if len(p.idleConns) == 0 { | |||||
return nil | |||||
} | |||||
cn := p.idleConns[0] | |||||
if !p.isStaleConn(cn) { | |||||
return nil | |||||
} | |||||
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...) | |||||
p.idleConnsLen-- | |||||
p.removeConn(cn) | |||||
return cn | |||||
} | |||||
func (p *ConnPool) isStaleConn(cn *Conn) bool { | |||||
if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 { | |||||
return false | |||||
} | |||||
now := time.Now() | |||||
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout { | |||||
return true | |||||
} | |||||
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge { | |||||
return true | |||||
} | |||||
return false | |||||
} |
@@ -0,0 +1,58 @@ | |||||
package pool | |||||
import "context" | |||||
type SingleConnPool struct { | |||||
pool Pooler | |||||
cn *Conn | |||||
stickyErr error | |||||
} | |||||
var _ Pooler = (*SingleConnPool)(nil) | |||||
func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool { | |||||
return &SingleConnPool{ | |||||
pool: pool, | |||||
cn: cn, | |||||
} | |||||
} | |||||
func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) { | |||||
return p.pool.NewConn(ctx) | |||||
} | |||||
func (p *SingleConnPool) CloseConn(cn *Conn) error { | |||||
return p.pool.CloseConn(cn) | |||||
} | |||||
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) { | |||||
if p.stickyErr != nil { | |||||
return nil, p.stickyErr | |||||
} | |||||
return p.cn, nil | |||||
} | |||||
func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {} | |||||
func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) { | |||||
p.cn = nil | |||||
p.stickyErr = reason | |||||
} | |||||
func (p *SingleConnPool) Close() error { | |||||
p.cn = nil | |||||
p.stickyErr = ErrClosed | |||||
return nil | |||||
} | |||||
func (p *SingleConnPool) Len() int { | |||||
return 0 | |||||
} | |||||
func (p *SingleConnPool) IdleLen() int { | |||||
return 0 | |||||
} | |||||
func (p *SingleConnPool) Stats() *Stats { | |||||
return &Stats{} | |||||
} |
@@ -0,0 +1,201 @@ | |||||
package pool | |||||
import ( | |||||
"context" | |||||
"errors" | |||||
"fmt" | |||||
"sync/atomic" | |||||
) | |||||
const ( | |||||
stateDefault = 0 | |||||
stateInited = 1 | |||||
stateClosed = 2 | |||||
) | |||||
type BadConnError struct { | |||||
wrapped error | |||||
} | |||||
var _ error = (*BadConnError)(nil) | |||||
func (e BadConnError) Error() string { | |||||
s := "redis: Conn is in a bad state" | |||||
if e.wrapped != nil { | |||||
s += ": " + e.wrapped.Error() | |||||
} | |||||
return s | |||||
} | |||||
func (e BadConnError) Unwrap() error { | |||||
return e.wrapped | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type StickyConnPool struct { | |||||
pool Pooler | |||||
shared int32 // atomic | |||||
state uint32 // atomic | |||||
ch chan *Conn | |||||
_badConnError atomic.Value | |||||
} | |||||
var _ Pooler = (*StickyConnPool)(nil) | |||||
func NewStickyConnPool(pool Pooler) *StickyConnPool { | |||||
p, ok := pool.(*StickyConnPool) | |||||
if !ok { | |||||
p = &StickyConnPool{ | |||||
pool: pool, | |||||
ch: make(chan *Conn, 1), | |||||
} | |||||
} | |||||
atomic.AddInt32(&p.shared, 1) | |||||
return p | |||||
} | |||||
func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) { | |||||
return p.pool.NewConn(ctx) | |||||
} | |||||
func (p *StickyConnPool) CloseConn(cn *Conn) error { | |||||
return p.pool.CloseConn(cn) | |||||
} | |||||
func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) { | |||||
// In worst case this races with Close which is not a very common operation. | |||||
for i := 0; i < 1000; i++ { | |||||
switch atomic.LoadUint32(&p.state) { | |||||
case stateDefault: | |||||
cn, err := p.pool.Get(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) { | |||||
return cn, nil | |||||
} | |||||
p.pool.Remove(ctx, cn, ErrClosed) | |||||
case stateInited: | |||||
if err := p.badConnError(); err != nil { | |||||
return nil, err | |||||
} | |||||
cn, ok := <-p.ch | |||||
if !ok { | |||||
return nil, ErrClosed | |||||
} | |||||
return cn, nil | |||||
case stateClosed: | |||||
return nil, ErrClosed | |||||
default: | |||||
panic("not reached") | |||||
} | |||||
} | |||||
return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop") | |||||
} | |||||
func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) { | |||||
defer func() { | |||||
if recover() != nil { | |||||
p.freeConn(ctx, cn) | |||||
} | |||||
}() | |||||
p.ch <- cn | |||||
} | |||||
func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) { | |||||
if err := p.badConnError(); err != nil { | |||||
p.pool.Remove(ctx, cn, err) | |||||
} else { | |||||
p.pool.Put(ctx, cn) | |||||
} | |||||
} | |||||
func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) { | |||||
defer func() { | |||||
if recover() != nil { | |||||
p.pool.Remove(ctx, cn, ErrClosed) | |||||
} | |||||
}() | |||||
p._badConnError.Store(BadConnError{wrapped: reason}) | |||||
p.ch <- cn | |||||
} | |||||
func (p *StickyConnPool) Close() error { | |||||
if shared := atomic.AddInt32(&p.shared, -1); shared > 0 { | |||||
return nil | |||||
} | |||||
for i := 0; i < 1000; i++ { | |||||
state := atomic.LoadUint32(&p.state) | |||||
if state == stateClosed { | |||||
return ErrClosed | |||||
} | |||||
if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) { | |||||
close(p.ch) | |||||
cn, ok := <-p.ch | |||||
if ok { | |||||
p.freeConn(context.TODO(), cn) | |||||
} | |||||
return nil | |||||
} | |||||
} | |||||
return errors.New("redis: StickyConnPool.Close: infinite loop") | |||||
} | |||||
func (p *StickyConnPool) Reset(ctx context.Context) error { | |||||
if p.badConnError() == nil { | |||||
return nil | |||||
} | |||||
select { | |||||
case cn, ok := <-p.ch: | |||||
if !ok { | |||||
return ErrClosed | |||||
} | |||||
p.pool.Remove(ctx, cn, ErrClosed) | |||||
p._badConnError.Store(BadConnError{wrapped: nil}) | |||||
default: | |||||
return errors.New("redis: StickyConnPool does not have a Conn") | |||||
} | |||||
if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) { | |||||
state := atomic.LoadUint32(&p.state) | |||||
return fmt.Errorf("redis: invalid StickyConnPool state: %d", state) | |||||
} | |||||
return nil | |||||
} | |||||
func (p *StickyConnPool) badConnError() error { | |||||
if v := p._badConnError.Load(); v != nil { | |||||
if err := v.(BadConnError); err.wrapped != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (p *StickyConnPool) Len() int { | |||||
switch atomic.LoadUint32(&p.state) { | |||||
case stateDefault: | |||||
return 0 | |||||
case stateInited: | |||||
return 1 | |||||
case stateClosed: | |||||
return 0 | |||||
default: | |||||
panic("not reached") | |||||
} | |||||
} | |||||
func (p *StickyConnPool) IdleLen() int { | |||||
return len(p.ch) | |||||
} | |||||
func (p *StickyConnPool) Stats() *Stats { | |||||
return &Stats{} | |||||
} |
@@ -0,0 +1,332 @@ | |||||
package proto | |||||
import ( | |||||
"bufio" | |||||
"fmt" | |||||
"io" | |||||
"github.com/go-redis/redis/v8/internal/util" | |||||
) | |||||
// redis resp protocol data type. | |||||
const ( | |||||
ErrorReply = '-' | |||||
StatusReply = '+' | |||||
IntReply = ':' | |||||
StringReply = '$' | |||||
ArrayReply = '*' | |||||
) | |||||
//------------------------------------------------------------------------------ | |||||
const Nil = RedisError("redis: nil") // nolint:errname | |||||
type RedisError string | |||||
func (e RedisError) Error() string { return string(e) } | |||||
func (RedisError) RedisError() {} | |||||
//------------------------------------------------------------------------------ | |||||
type MultiBulkParse func(*Reader, int64) (interface{}, error) | |||||
type Reader struct { | |||||
rd *bufio.Reader | |||||
_buf []byte | |||||
} | |||||
func NewReader(rd io.Reader) *Reader { | |||||
return &Reader{ | |||||
rd: bufio.NewReader(rd), | |||||
_buf: make([]byte, 64), | |||||
} | |||||
} | |||||
func (r *Reader) Buffered() int { | |||||
return r.rd.Buffered() | |||||
} | |||||
func (r *Reader) Peek(n int) ([]byte, error) { | |||||
return r.rd.Peek(n) | |||||
} | |||||
func (r *Reader) Reset(rd io.Reader) { | |||||
r.rd.Reset(rd) | |||||
} | |||||
func (r *Reader) ReadLine() ([]byte, error) { | |||||
line, err := r.readLine() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if isNilReply(line) { | |||||
return nil, Nil | |||||
} | |||||
return line, nil | |||||
} | |||||
// readLine that returns an error if: | |||||
// - there is a pending read error; | |||||
// - or line does not end with \r\n. | |||||
func (r *Reader) readLine() ([]byte, error) { | |||||
b, err := r.rd.ReadSlice('\n') | |||||
if err != nil { | |||||
if err != bufio.ErrBufferFull { | |||||
return nil, err | |||||
} | |||||
full := make([]byte, len(b)) | |||||
copy(full, b) | |||||
b, err = r.rd.ReadBytes('\n') | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
full = append(full, b...) //nolint:makezero | |||||
b = full | |||||
} | |||||
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' { | |||||
return nil, fmt.Errorf("redis: invalid reply: %q", b) | |||||
} | |||||
return b[:len(b)-2], nil | |||||
} | |||||
func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { | |||||
line, err := r.ReadLine() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
switch line[0] { | |||||
case ErrorReply: | |||||
return nil, ParseErrorReply(line) | |||||
case StatusReply: | |||||
return string(line[1:]), nil | |||||
case IntReply: | |||||
return util.ParseInt(line[1:], 10, 64) | |||||
case StringReply: | |||||
return r.readStringReply(line) | |||||
case ArrayReply: | |||||
n, err := parseArrayLen(line) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if m == nil { | |||||
err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line) | |||||
return nil, err | |||||
} | |||||
return m(r, n) | |||||
} | |||||
return nil, fmt.Errorf("redis: can't parse %.100q", line) | |||||
} | |||||
func (r *Reader) ReadIntReply() (int64, error) { | |||||
line, err := r.ReadLine() | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
switch line[0] { | |||||
case ErrorReply: | |||||
return 0, ParseErrorReply(line) | |||||
case IntReply: | |||||
return util.ParseInt(line[1:], 10, 64) | |||||
default: | |||||
return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) | |||||
} | |||||
} | |||||
func (r *Reader) ReadString() (string, error) { | |||||
line, err := r.ReadLine() | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
switch line[0] { | |||||
case ErrorReply: | |||||
return "", ParseErrorReply(line) | |||||
case StringReply: | |||||
return r.readStringReply(line) | |||||
case StatusReply: | |||||
return string(line[1:]), nil | |||||
case IntReply: | |||||
return string(line[1:]), nil | |||||
default: | |||||
return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line) | |||||
} | |||||
} | |||||
func (r *Reader) readStringReply(line []byte) (string, error) { | |||||
if isNilReply(line) { | |||||
return "", Nil | |||||
} | |||||
replyLen, err := util.Atoi(line[1:]) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
b := make([]byte, replyLen+2) | |||||
_, err = io.ReadFull(r.rd, b) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
return util.BytesToString(b[:replyLen]), nil | |||||
} | |||||
func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { | |||||
line, err := r.ReadLine() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
switch line[0] { | |||||
case ErrorReply: | |||||
return nil, ParseErrorReply(line) | |||||
case ArrayReply: | |||||
n, err := parseArrayLen(line) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return m(r, n) | |||||
default: | |||||
return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) | |||||
} | |||||
} | |||||
func (r *Reader) ReadArrayLen() (int, error) { | |||||
line, err := r.ReadLine() | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
switch line[0] { | |||||
case ErrorReply: | |||||
return 0, ParseErrorReply(line) | |||||
case ArrayReply: | |||||
n, err := parseArrayLen(line) | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
return int(n), nil | |||||
default: | |||||
return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) | |||||
} | |||||
} | |||||
func (r *Reader) ReadScanReply() ([]string, uint64, error) { | |||||
n, err := r.ReadArrayLen() | |||||
if err != nil { | |||||
return nil, 0, err | |||||
} | |||||
if n != 2 { | |||||
return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) | |||||
} | |||||
cursor, err := r.ReadUint() | |||||
if err != nil { | |||||
return nil, 0, err | |||||
} | |||||
n, err = r.ReadArrayLen() | |||||
if err != nil { | |||||
return nil, 0, err | |||||
} | |||||
keys := make([]string, n) | |||||
for i := 0; i < n; i++ { | |||||
key, err := r.ReadString() | |||||
if err != nil { | |||||
return nil, 0, err | |||||
} | |||||
keys[i] = key | |||||
} | |||||
return keys, cursor, err | |||||
} | |||||
func (r *Reader) ReadInt() (int64, error) { | |||||
b, err := r.readTmpBytesReply() | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
return util.ParseInt(b, 10, 64) | |||||
} | |||||
func (r *Reader) ReadUint() (uint64, error) { | |||||
b, err := r.readTmpBytesReply() | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
return util.ParseUint(b, 10, 64) | |||||
} | |||||
func (r *Reader) ReadFloatReply() (float64, error) { | |||||
b, err := r.readTmpBytesReply() | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
return util.ParseFloat(b, 64) | |||||
} | |||||
func (r *Reader) readTmpBytesReply() ([]byte, error) { | |||||
line, err := r.ReadLine() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
switch line[0] { | |||||
case ErrorReply: | |||||
return nil, ParseErrorReply(line) | |||||
case StringReply: | |||||
return r._readTmpBytesReply(line) | |||||
case StatusReply: | |||||
return line[1:], nil | |||||
default: | |||||
return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) | |||||
} | |||||
} | |||||
func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) { | |||||
if isNilReply(line) { | |||||
return nil, Nil | |||||
} | |||||
replyLen, err := util.Atoi(line[1:]) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
buf := r.buf(replyLen + 2) | |||||
_, err = io.ReadFull(r.rd, buf) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return buf[:replyLen], nil | |||||
} | |||||
func (r *Reader) buf(n int) []byte { | |||||
if n <= cap(r._buf) { | |||||
return r._buf[:n] | |||||
} | |||||
d := n - cap(r._buf) | |||||
r._buf = append(r._buf, make([]byte, d)...) | |||||
return r._buf | |||||
} | |||||
func isNilReply(b []byte) bool { | |||||
return len(b) == 3 && | |||||
(b[0] == StringReply || b[0] == ArrayReply) && | |||||
b[1] == '-' && b[2] == '1' | |||||
} | |||||
func ParseErrorReply(line []byte) error { | |||||
return RedisError(string(line[1:])) | |||||
} | |||||
func parseArrayLen(line []byte) (int64, error) { | |||||
if isNilReply(line) { | |||||
return 0, Nil | |||||
} | |||||
return util.ParseInt(line[1:], 10, 64) | |||||
} |
@@ -0,0 +1,172 @@ | |||||
package proto | |||||
import ( | |||||
"encoding" | |||||
"fmt" | |||||
"reflect" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal/util" | |||||
) | |||||
// Scan parses bytes `b` to `v` with appropriate type. | |||||
func Scan(b []byte, v interface{}) error { | |||||
switch v := v.(type) { | |||||
case nil: | |||||
return fmt.Errorf("redis: Scan(nil)") | |||||
case *string: | |||||
*v = util.BytesToString(b) | |||||
return nil | |||||
case *[]byte: | |||||
*v = b | |||||
return nil | |||||
case *int: | |||||
var err error | |||||
*v, err = util.Atoi(b) | |||||
return err | |||||
case *int8: | |||||
n, err := util.ParseInt(b, 10, 8) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = int8(n) | |||||
return nil | |||||
case *int16: | |||||
n, err := util.ParseInt(b, 10, 16) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = int16(n) | |||||
return nil | |||||
case *int32: | |||||
n, err := util.ParseInt(b, 10, 32) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = int32(n) | |||||
return nil | |||||
case *int64: | |||||
n, err := util.ParseInt(b, 10, 64) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = n | |||||
return nil | |||||
case *uint: | |||||
n, err := util.ParseUint(b, 10, 64) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = uint(n) | |||||
return nil | |||||
case *uint8: | |||||
n, err := util.ParseUint(b, 10, 8) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = uint8(n) | |||||
return nil | |||||
case *uint16: | |||||
n, err := util.ParseUint(b, 10, 16) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = uint16(n) | |||||
return nil | |||||
case *uint32: | |||||
n, err := util.ParseUint(b, 10, 32) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = uint32(n) | |||||
return nil | |||||
case *uint64: | |||||
n, err := util.ParseUint(b, 10, 64) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = n | |||||
return nil | |||||
case *float32: | |||||
n, err := util.ParseFloat(b, 32) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
*v = float32(n) | |||||
return err | |||||
case *float64: | |||||
var err error | |||||
*v, err = util.ParseFloat(b, 64) | |||||
return err | |||||
case *bool: | |||||
*v = len(b) == 1 && b[0] == '1' | |||||
return nil | |||||
case *time.Time: | |||||
var err error | |||||
*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b)) | |||||
return err | |||||
case encoding.BinaryUnmarshaler: | |||||
return v.UnmarshalBinary(b) | |||||
default: | |||||
return fmt.Errorf( | |||||
"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v) | |||||
} | |||||
} | |||||
func ScanSlice(data []string, slice interface{}) error { | |||||
v := reflect.ValueOf(slice) | |||||
if !v.IsValid() { | |||||
return fmt.Errorf("redis: ScanSlice(nil)") | |||||
} | |||||
if v.Kind() != reflect.Ptr { | |||||
return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice) | |||||
} | |||||
v = v.Elem() | |||||
if v.Kind() != reflect.Slice { | |||||
return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice) | |||||
} | |||||
next := makeSliceNextElemFunc(v) | |||||
for i, s := range data { | |||||
elem := next() | |||||
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil { | |||||
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err) | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value { | |||||
elemType := v.Type().Elem() | |||||
if elemType.Kind() == reflect.Ptr { | |||||
elemType = elemType.Elem() | |||||
return func() reflect.Value { | |||||
if v.Len() < v.Cap() { | |||||
v.Set(v.Slice(0, v.Len()+1)) | |||||
elem := v.Index(v.Len() - 1) | |||||
if elem.IsNil() { | |||||
elem.Set(reflect.New(elemType)) | |||||
} | |||||
return elem.Elem() | |||||
} | |||||
elem := reflect.New(elemType) | |||||
v.Set(reflect.Append(v, elem)) | |||||
return elem.Elem() | |||||
} | |||||
} | |||||
zero := reflect.Zero(elemType) | |||||
return func() reflect.Value { | |||||
if v.Len() < v.Cap() { | |||||
v.Set(v.Slice(0, v.Len()+1)) | |||||
return v.Index(v.Len() - 1) | |||||
} | |||||
v.Set(reflect.Append(v, zero)) | |||||
return v.Index(v.Len() - 1) | |||||
} | |||||
} |
@@ -0,0 +1,153 @@ | |||||
package proto | |||||
import ( | |||||
"encoding" | |||||
"fmt" | |||||
"io" | |||||
"strconv" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal/util" | |||||
) | |||||
type writer interface { | |||||
io.Writer | |||||
io.ByteWriter | |||||
// io.StringWriter | |||||
WriteString(s string) (n int, err error) | |||||
} | |||||
type Writer struct { | |||||
writer | |||||
lenBuf []byte | |||||
numBuf []byte | |||||
} | |||||
func NewWriter(wr writer) *Writer { | |||||
return &Writer{ | |||||
writer: wr, | |||||
lenBuf: make([]byte, 64), | |||||
numBuf: make([]byte, 64), | |||||
} | |||||
} | |||||
func (w *Writer) WriteArgs(args []interface{}) error { | |||||
if err := w.WriteByte(ArrayReply); err != nil { | |||||
return err | |||||
} | |||||
if err := w.writeLen(len(args)); err != nil { | |||||
return err | |||||
} | |||||
for _, arg := range args { | |||||
if err := w.WriteArg(arg); err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (w *Writer) writeLen(n int) error { | |||||
w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10) | |||||
w.lenBuf = append(w.lenBuf, '\r', '\n') | |||||
_, err := w.Write(w.lenBuf) | |||||
return err | |||||
} | |||||
func (w *Writer) WriteArg(v interface{}) error { | |||||
switch v := v.(type) { | |||||
case nil: | |||||
return w.string("") | |||||
case string: | |||||
return w.string(v) | |||||
case []byte: | |||||
return w.bytes(v) | |||||
case int: | |||||
return w.int(int64(v)) | |||||
case int8: | |||||
return w.int(int64(v)) | |||||
case int16: | |||||
return w.int(int64(v)) | |||||
case int32: | |||||
return w.int(int64(v)) | |||||
case int64: | |||||
return w.int(v) | |||||
case uint: | |||||
return w.uint(uint64(v)) | |||||
case uint8: | |||||
return w.uint(uint64(v)) | |||||
case uint16: | |||||
return w.uint(uint64(v)) | |||||
case uint32: | |||||
return w.uint(uint64(v)) | |||||
case uint64: | |||||
return w.uint(v) | |||||
case float32: | |||||
return w.float(float64(v)) | |||||
case float64: | |||||
return w.float(v) | |||||
case bool: | |||||
if v { | |||||
return w.int(1) | |||||
} | |||||
return w.int(0) | |||||
case time.Time: | |||||
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) | |||||
return w.bytes(w.numBuf) | |||||
case encoding.BinaryMarshaler: | |||||
b, err := v.MarshalBinary() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return w.bytes(b) | |||||
default: | |||||
return fmt.Errorf( | |||||
"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v) | |||||
} | |||||
} | |||||
func (w *Writer) bytes(b []byte) error { | |||||
if err := w.WriteByte(StringReply); err != nil { | |||||
return err | |||||
} | |||||
if err := w.writeLen(len(b)); err != nil { | |||||
return err | |||||
} | |||||
if _, err := w.Write(b); err != nil { | |||||
return err | |||||
} | |||||
return w.crlf() | |||||
} | |||||
func (w *Writer) string(s string) error { | |||||
return w.bytes(util.StringToBytes(s)) | |||||
} | |||||
func (w *Writer) uint(n uint64) error { | |||||
w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10) | |||||
return w.bytes(w.numBuf) | |||||
} | |||||
func (w *Writer) int(n int64) error { | |||||
w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10) | |||||
return w.bytes(w.numBuf) | |||||
} | |||||
func (w *Writer) float(f float64) error { | |||||
w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64) | |||||
return w.bytes(w.numBuf) | |||||
} | |||||
func (w *Writer) crlf() error { | |||||
if err := w.WriteByte('\r'); err != nil { | |||||
return err | |||||
} | |||||
return w.WriteByte('\n') | |||||
} |
@@ -0,0 +1,50 @@ | |||||
package rand | |||||
import ( | |||||
"math/rand" | |||||
"sync" | |||||
) | |||||
// Int returns a non-negative pseudo-random int. | |||||
func Int() int { return pseudo.Int() } | |||||
// Intn returns, as an int, a non-negative pseudo-random number in [0,n). | |||||
// It panics if n <= 0. | |||||
func Intn(n int) int { return pseudo.Intn(n) } | |||||
// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). | |||||
// It panics if n <= 0. | |||||
func Int63n(n int64) int64 { return pseudo.Int63n(n) } | |||||
// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). | |||||
func Perm(n int) []int { return pseudo.Perm(n) } | |||||
// Seed uses the provided seed value to initialize the default Source to a | |||||
// deterministic state. If Seed is not called, the generator behaves as if | |||||
// seeded by Seed(1). | |||||
func Seed(n int64) { pseudo.Seed(n) } | |||||
var pseudo = rand.New(&source{src: rand.NewSource(1)}) | |||||
type source struct { | |||||
src rand.Source | |||||
mu sync.Mutex | |||||
} | |||||
func (s *source) Int63() int64 { | |||||
s.mu.Lock() | |||||
n := s.src.Int63() | |||||
s.mu.Unlock() | |||||
return n | |||||
} | |||||
func (s *source) Seed(seed int64) { | |||||
s.mu.Lock() | |||||
s.src.Seed(seed) | |||||
s.mu.Unlock() | |||||
} | |||||
// Shuffle pseudo-randomizes the order of elements. | |||||
// n is the number of elements. | |||||
// swap swaps the elements with indexes i and j. | |||||
func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) } |
@@ -0,0 +1,12 @@ | |||||
//go:build appengine | |||||
// +build appengine | |||||
package internal | |||||
func String(b []byte) string { | |||||
return string(b) | |||||
} | |||||
func Bytes(s string) []byte { | |||||
return []byte(s) | |||||
} |
@@ -0,0 +1,21 @@ | |||||
//go:build !appengine | |||||
// +build !appengine | |||||
package internal | |||||
import "unsafe" | |||||
// String converts byte slice to string. | |||||
func String(b []byte) string { | |||||
return *(*string)(unsafe.Pointer(&b)) | |||||
} | |||||
// Bytes converts string to byte slice. | |||||
func Bytes(s string) []byte { | |||||
return *(*[]byte)(unsafe.Pointer( | |||||
&struct { | |||||
string | |||||
Cap int | |||||
}{s, len(s)}, | |||||
)) | |||||
} |
@@ -0,0 +1,46 @@ | |||||
package internal | |||||
import ( | |||||
"context" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal/util" | |||||
) | |||||
func Sleep(ctx context.Context, dur time.Duration) error { | |||||
t := time.NewTimer(dur) | |||||
defer t.Stop() | |||||
select { | |||||
case <-t.C: | |||||
return nil | |||||
case <-ctx.Done(): | |||||
return ctx.Err() | |||||
} | |||||
} | |||||
func ToLower(s string) string { | |||||
if isLower(s) { | |||||
return s | |||||
} | |||||
b := make([]byte, len(s)) | |||||
for i := range b { | |||||
c := s[i] | |||||
if c >= 'A' && c <= 'Z' { | |||||
c += 'a' - 'A' | |||||
} | |||||
b[i] = c | |||||
} | |||||
return util.BytesToString(b) | |||||
} | |||||
func isLower(s string) bool { | |||||
for i := 0; i < len(s); i++ { | |||||
c := s[i] | |||||
if c >= 'A' && c <= 'Z' { | |||||
return false | |||||
} | |||||
} | |||||
return true | |||||
} |
@@ -0,0 +1,12 @@ | |||||
//go:build appengine | |||||
// +build appengine | |||||
package util | |||||
func BytesToString(b []byte) string { | |||||
return string(b) | |||||
} | |||||
func StringToBytes(s string) []byte { | |||||
return []byte(s) | |||||
} |
@@ -0,0 +1,19 @@ | |||||
package util | |||||
import "strconv" | |||||
func Atoi(b []byte) (int, error) { | |||||
return strconv.Atoi(BytesToString(b)) | |||||
} | |||||
func ParseInt(b []byte, base int, bitSize int) (int64, error) { | |||||
return strconv.ParseInt(BytesToString(b), base, bitSize) | |||||
} | |||||
func ParseUint(b []byte, base int, bitSize int) (uint64, error) { | |||||
return strconv.ParseUint(BytesToString(b), base, bitSize) | |||||
} | |||||
func ParseFloat(b []byte, bitSize int) (float64, error) { | |||||
return strconv.ParseFloat(BytesToString(b), bitSize) | |||||
} |
@@ -0,0 +1,23 @@ | |||||
//go:build !appengine | |||||
// +build !appengine | |||||
package util | |||||
import ( | |||||
"unsafe" | |||||
) | |||||
// BytesToString converts byte slice to string. | |||||
func BytesToString(b []byte) string { | |||||
return *(*string)(unsafe.Pointer(&b)) | |||||
} | |||||
// StringToBytes converts string to byte slice. | |||||
func StringToBytes(s string) []byte { | |||||
return *(*[]byte)(unsafe.Pointer( | |||||
&struct { | |||||
string | |||||
Cap int | |||||
}{s, len(s)}, | |||||
)) | |||||
} |
@@ -0,0 +1,77 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"sync" | |||||
) | |||||
// ScanIterator is used to incrementally iterate over a collection of elements. | |||||
// It's safe for concurrent use by multiple goroutines. | |||||
type ScanIterator struct { | |||||
mu sync.Mutex // protects Scanner and pos | |||||
cmd *ScanCmd | |||||
pos int | |||||
} | |||||
// Err returns the last iterator error, if any. | |||||
func (it *ScanIterator) Err() error { | |||||
it.mu.Lock() | |||||
err := it.cmd.Err() | |||||
it.mu.Unlock() | |||||
return err | |||||
} | |||||
// Next advances the cursor and returns true if more values can be read. | |||||
func (it *ScanIterator) Next(ctx context.Context) bool { | |||||
it.mu.Lock() | |||||
defer it.mu.Unlock() | |||||
// Instantly return on errors. | |||||
if it.cmd.Err() != nil { | |||||
return false | |||||
} | |||||
// Advance cursor, check if we are still within range. | |||||
if it.pos < len(it.cmd.page) { | |||||
it.pos++ | |||||
return true | |||||
} | |||||
for { | |||||
// Return if there is no more data to fetch. | |||||
if it.cmd.cursor == 0 { | |||||
return false | |||||
} | |||||
// Fetch next page. | |||||
switch it.cmd.args[0] { | |||||
case "scan", "qscan": | |||||
it.cmd.args[1] = it.cmd.cursor | |||||
default: | |||||
it.cmd.args[2] = it.cmd.cursor | |||||
} | |||||
err := it.cmd.process(ctx, it.cmd) | |||||
if err != nil { | |||||
return false | |||||
} | |||||
it.pos = 1 | |||||
// Redis can occasionally return empty page. | |||||
if len(it.cmd.page) > 0 { | |||||
return true | |||||
} | |||||
} | |||||
} | |||||
// Val returns the key/field at the current cursor position. | |||||
func (it *ScanIterator) Val() string { | |||||
var v string | |||||
it.mu.Lock() | |||||
if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) { | |||||
v = it.cmd.page[it.pos-1] | |||||
} | |||||
it.mu.Unlock() | |||||
return v | |||||
} |
@@ -0,0 +1,429 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"crypto/tls" | |||||
"errors" | |||||
"fmt" | |||||
"net" | |||||
"net/url" | |||||
"runtime" | |||||
"sort" | |||||
"strconv" | |||||
"strings" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
) | |||||
// Limiter is the interface of a rate limiter or a circuit breaker. | |||||
type Limiter interface { | |||||
// Allow returns nil if operation is allowed or an error otherwise. | |||||
// If operation is allowed client must ReportResult of the operation | |||||
// whether it is a success or a failure. | |||||
Allow() error | |||||
// ReportResult reports the result of the previously allowed operation. | |||||
// nil indicates a success, non-nil error usually indicates a failure. | |||||
ReportResult(result error) | |||||
} | |||||
// Options keeps the settings to setup redis connection. | |||||
type Options struct { | |||||
// The network type, either tcp or unix. | |||||
// Default is tcp. | |||||
Network string | |||||
// host:port address. | |||||
Addr string | |||||
// Dialer creates new network connection and has priority over | |||||
// Network and Addr options. | |||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error) | |||||
// Hook that is called when new connection is established. | |||||
OnConnect func(ctx context.Context, cn *Conn) error | |||||
// Use the specified Username to authenticate the current connection | |||||
// with one of the connections defined in the ACL list when connecting | |||||
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system. | |||||
Username string | |||||
// Optional password. Must match the password specified in the | |||||
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), | |||||
// or the User Password when connecting to a Redis 6.0 instance, or greater, | |||||
// that is using the Redis ACL system. | |||||
Password string | |||||
// Database to be selected after connecting to the server. | |||||
DB int | |||||
// Maximum number of retries before giving up. | |||||
// Default is 3 retries; -1 (not 0) disables retries. | |||||
MaxRetries int | |||||
// Minimum backoff between each retry. | |||||
// Default is 8 milliseconds; -1 disables backoff. | |||||
MinRetryBackoff time.Duration | |||||
// Maximum backoff between each retry. | |||||
// Default is 512 milliseconds; -1 disables backoff. | |||||
MaxRetryBackoff time.Duration | |||||
// Dial timeout for establishing new connections. | |||||
// Default is 5 seconds. | |||||
DialTimeout time.Duration | |||||
// Timeout for socket reads. If reached, commands will fail | |||||
// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default. | |||||
// Default is 3 seconds. | |||||
ReadTimeout time.Duration | |||||
// Timeout for socket writes. If reached, commands will fail | |||||
// with a timeout instead of blocking. | |||||
// Default is ReadTimeout. | |||||
WriteTimeout time.Duration | |||||
// Type of connection pool. | |||||
// true for FIFO pool, false for LIFO pool. | |||||
// Note that fifo has higher overhead compared to lifo. | |||||
PoolFIFO bool | |||||
// Maximum number of socket connections. | |||||
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. | |||||
PoolSize int | |||||
// Minimum number of idle connections which is useful when establishing | |||||
// new connection is slow. | |||||
MinIdleConns int | |||||
// Connection age at which client retires (closes) the connection. | |||||
// Default is to not close aged connections. | |||||
MaxConnAge time.Duration | |||||
// Amount of time client waits for connection if all connections | |||||
// are busy before returning an error. | |||||
// Default is ReadTimeout + 1 second. | |||||
PoolTimeout time.Duration | |||||
// Amount of time after which client closes idle connections. | |||||
// Should be less than server's timeout. | |||||
// Default is 5 minutes. -1 disables idle timeout check. | |||||
IdleTimeout time.Duration | |||||
// Frequency of idle checks made by idle connections reaper. | |||||
// Default is 1 minute. -1 disables idle connections reaper, | |||||
// but idle connections are still discarded by the client | |||||
// if IdleTimeout is set. | |||||
IdleCheckFrequency time.Duration | |||||
// Enables read only queries on slave nodes. | |||||
readOnly bool | |||||
// TLS Config to use. When set TLS will be negotiated. | |||||
TLSConfig *tls.Config | |||||
// Limiter interface used to implemented circuit breaker or rate limiter. | |||||
Limiter Limiter | |||||
} | |||||
func (opt *Options) init() { | |||||
if opt.Addr == "" { | |||||
opt.Addr = "localhost:6379" | |||||
} | |||||
if opt.Network == "" { | |||||
if strings.HasPrefix(opt.Addr, "/") { | |||||
opt.Network = "unix" | |||||
} else { | |||||
opt.Network = "tcp" | |||||
} | |||||
} | |||||
if opt.DialTimeout == 0 { | |||||
opt.DialTimeout = 5 * time.Second | |||||
} | |||||
if opt.Dialer == nil { | |||||
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) { | |||||
netDialer := &net.Dialer{ | |||||
Timeout: opt.DialTimeout, | |||||
KeepAlive: 5 * time.Minute, | |||||
} | |||||
if opt.TLSConfig == nil { | |||||
return netDialer.DialContext(ctx, network, addr) | |||||
} | |||||
return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig) | |||||
} | |||||
} | |||||
if opt.PoolSize == 0 { | |||||
opt.PoolSize = 10 * runtime.GOMAXPROCS(0) | |||||
} | |||||
switch opt.ReadTimeout { | |||||
case -1: | |||||
opt.ReadTimeout = 0 | |||||
case 0: | |||||
opt.ReadTimeout = 3 * time.Second | |||||
} | |||||
switch opt.WriteTimeout { | |||||
case -1: | |||||
opt.WriteTimeout = 0 | |||||
case 0: | |||||
opt.WriteTimeout = opt.ReadTimeout | |||||
} | |||||
if opt.PoolTimeout == 0 { | |||||
opt.PoolTimeout = opt.ReadTimeout + time.Second | |||||
} | |||||
if opt.IdleTimeout == 0 { | |||||
opt.IdleTimeout = 5 * time.Minute | |||||
} | |||||
if opt.IdleCheckFrequency == 0 { | |||||
opt.IdleCheckFrequency = time.Minute | |||||
} | |||||
if opt.MaxRetries == -1 { | |||||
opt.MaxRetries = 0 | |||||
} else if opt.MaxRetries == 0 { | |||||
opt.MaxRetries = 3 | |||||
} | |||||
switch opt.MinRetryBackoff { | |||||
case -1: | |||||
opt.MinRetryBackoff = 0 | |||||
case 0: | |||||
opt.MinRetryBackoff = 8 * time.Millisecond | |||||
} | |||||
switch opt.MaxRetryBackoff { | |||||
case -1: | |||||
opt.MaxRetryBackoff = 0 | |||||
case 0: | |||||
opt.MaxRetryBackoff = 512 * time.Millisecond | |||||
} | |||||
} | |||||
func (opt *Options) clone() *Options { | |||||
clone := *opt | |||||
return &clone | |||||
} | |||||
// ParseURL parses an URL into Options that can be used to connect to Redis. | |||||
// Scheme is required. | |||||
// There are two connection types: by tcp socket and by unix socket. | |||||
// Tcp connection: | |||||
// redis://<user>:<password>@<host>:<port>/<db_number> | |||||
// Unix connection: | |||||
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number> | |||||
// Most Option fields can be set using query parameters, with the following restrictions: | |||||
// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries | |||||
// - only scalar type fields are supported (bool, int, time.Duration) | |||||
// - for time.Duration fields, values must be a valid input for time.ParseDuration(); | |||||
// additionally a plain integer as value (i.e. without unit) is intepreted as seconds | |||||
// - to disable a duration field, use value less than or equal to 0; to use the default | |||||
// value, leave the value blank or remove the parameter | |||||
// - only the last value is interpreted if a parameter is given multiple times | |||||
// - fields "network", "addr", "username" and "password" can only be set using other | |||||
// URL attributes (scheme, host, userinfo, resp.), query paremeters using these | |||||
// names will be treated as unknown parameters | |||||
// - unknown parameter names will result in an error | |||||
// Examples: | |||||
// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 | |||||
// is equivalent to: | |||||
// &Options{ | |||||
// Network: "tcp", | |||||
// Addr: "localhost:6789", | |||||
// DB: 1, // path "/3" was overridden by "&db=1" | |||||
// DialTimeout: 3 * time.Second, // no time unit = seconds | |||||
// ReadTimeout: 6 * time.Second, | |||||
// MaxRetries: 2, | |||||
// } | |||||
func ParseURL(redisURL string) (*Options, error) { | |||||
u, err := url.Parse(redisURL) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
switch u.Scheme { | |||||
case "redis", "rediss": | |||||
return setupTCPConn(u) | |||||
case "unix": | |||||
return setupUnixConn(u) | |||||
default: | |||||
return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) | |||||
} | |||||
} | |||||
func setupTCPConn(u *url.URL) (*Options, error) { | |||||
o := &Options{Network: "tcp"} | |||||
o.Username, o.Password = getUserPassword(u) | |||||
h, p, err := net.SplitHostPort(u.Host) | |||||
if err != nil { | |||||
h = u.Host | |||||
} | |||||
if h == "" { | |||||
h = "localhost" | |||||
} | |||||
if p == "" { | |||||
p = "6379" | |||||
} | |||||
o.Addr = net.JoinHostPort(h, p) | |||||
f := strings.FieldsFunc(u.Path, func(r rune) bool { | |||||
return r == '/' | |||||
}) | |||||
switch len(f) { | |||||
case 0: | |||||
o.DB = 0 | |||||
case 1: | |||||
if o.DB, err = strconv.Atoi(f[0]); err != nil { | |||||
return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) | |||||
} | |||||
default: | |||||
return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) | |||||
} | |||||
if u.Scheme == "rediss" { | |||||
o.TLSConfig = &tls.Config{ServerName: h} | |||||
} | |||||
return setupConnParams(u, o) | |||||
} | |||||
func setupUnixConn(u *url.URL) (*Options, error) { | |||||
o := &Options{ | |||||
Network: "unix", | |||||
} | |||||
if strings.TrimSpace(u.Path) == "" { // path is required with unix connection | |||||
return nil, errors.New("redis: empty unix socket path") | |||||
} | |||||
o.Addr = u.Path | |||||
o.Username, o.Password = getUserPassword(u) | |||||
return setupConnParams(u, o) | |||||
} | |||||
type queryOptions struct { | |||||
q url.Values | |||||
err error | |||||
} | |||||
func (o *queryOptions) string(name string) string { | |||||
vs := o.q[name] | |||||
if len(vs) == 0 { | |||||
return "" | |||||
} | |||||
delete(o.q, name) // enable detection of unknown parameters | |||||
return vs[len(vs)-1] | |||||
} | |||||
func (o *queryOptions) int(name string) int { | |||||
s := o.string(name) | |||||
if s == "" { | |||||
return 0 | |||||
} | |||||
i, err := strconv.Atoi(s) | |||||
if err == nil { | |||||
return i | |||||
} | |||||
if o.err == nil { | |||||
o.err = fmt.Errorf("redis: invalid %s number: %s", name, err) | |||||
} | |||||
return 0 | |||||
} | |||||
func (o *queryOptions) duration(name string) time.Duration { | |||||
s := o.string(name) | |||||
if s == "" { | |||||
return 0 | |||||
} | |||||
// try plain number first | |||||
if i, err := strconv.Atoi(s); err == nil { | |||||
if i <= 0 { | |||||
// disable timeouts | |||||
return -1 | |||||
} | |||||
return time.Duration(i) * time.Second | |||||
} | |||||
dur, err := time.ParseDuration(s) | |||||
if err == nil { | |||||
return dur | |||||
} | |||||
if o.err == nil { | |||||
o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err) | |||||
} | |||||
return 0 | |||||
} | |||||
func (o *queryOptions) bool(name string) bool { | |||||
switch s := o.string(name); s { | |||||
case "true", "1": | |||||
return true | |||||
case "false", "0", "": | |||||
return false | |||||
default: | |||||
if o.err == nil { | |||||
o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s) | |||||
} | |||||
return false | |||||
} | |||||
} | |||||
func (o *queryOptions) remaining() []string { | |||||
if len(o.q) == 0 { | |||||
return nil | |||||
} | |||||
keys := make([]string, 0, len(o.q)) | |||||
for k := range o.q { | |||||
keys = append(keys, k) | |||||
} | |||||
sort.Strings(keys) | |||||
return keys | |||||
} | |||||
// setupConnParams converts query parameters in u to option value in o. | |||||
func setupConnParams(u *url.URL, o *Options) (*Options, error) { | |||||
q := queryOptions{q: u.Query()} | |||||
// compat: a future major release may use q.int("db") | |||||
if tmp := q.string("db"); tmp != "" { | |||||
db, err := strconv.Atoi(tmp) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("redis: invalid database number: %w", err) | |||||
} | |||||
o.DB = db | |||||
} | |||||
o.MaxRetries = q.int("max_retries") | |||||
o.MinRetryBackoff = q.duration("min_retry_backoff") | |||||
o.MaxRetryBackoff = q.duration("max_retry_backoff") | |||||
o.DialTimeout = q.duration("dial_timeout") | |||||
o.ReadTimeout = q.duration("read_timeout") | |||||
o.WriteTimeout = q.duration("write_timeout") | |||||
o.PoolFIFO = q.bool("pool_fifo") | |||||
o.PoolSize = q.int("pool_size") | |||||
o.MinIdleConns = q.int("min_idle_conns") | |||||
o.MaxConnAge = q.duration("max_conn_age") | |||||
o.PoolTimeout = q.duration("pool_timeout") | |||||
o.IdleTimeout = q.duration("idle_timeout") | |||||
o.IdleCheckFrequency = q.duration("idle_check_frequency") | |||||
if q.err != nil { | |||||
return nil, q.err | |||||
} | |||||
// any parameters left? | |||||
if r := q.remaining(); len(r) > 0 { | |||||
return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) | |||||
} | |||||
return o, nil | |||||
} | |||||
func getUserPassword(u *url.URL) (string, string) { | |||||
var user, password string | |||||
if u.User != nil { | |||||
user = u.User.Username() | |||||
if p, ok := u.User.Password(); ok { | |||||
password = p | |||||
} | |||||
} | |||||
return user, password | |||||
} | |||||
func newConnPool(opt *Options) *pool.ConnPool { | |||||
return pool.NewConnPool(&pool.Options{ | |||||
Dialer: func(ctx context.Context) (net.Conn, error) { | |||||
return opt.Dialer(ctx, opt.Network, opt.Addr) | |||||
}, | |||||
PoolFIFO: opt.PoolFIFO, | |||||
PoolSize: opt.PoolSize, | |||||
MinIdleConns: opt.MinIdleConns, | |||||
MaxConnAge: opt.MaxConnAge, | |||||
PoolTimeout: opt.PoolTimeout, | |||||
IdleTimeout: opt.IdleTimeout, | |||||
IdleCheckFrequency: opt.IdleCheckFrequency, | |||||
}) | |||||
} |
@@ -0,0 +1,8 @@ | |||||
{ | |||||
"name": "redis", | |||||
"version": "8.11.4", | |||||
"main": "index.js", | |||||
"repository": "git@github.com:go-redis/redis.git", | |||||
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>", | |||||
"license": "BSD-2-clause" | |||||
} |
@@ -0,0 +1,137 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"sync" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
) | |||||
type pipelineExecer func(context.Context, []Cmder) error | |||||
// Pipeliner is an mechanism to realise Redis Pipeline technique. | |||||
// | |||||
// Pipelining is a technique to extremely speed up processing by packing | |||||
// operations to batches, send them at once to Redis and read a replies in a | |||||
// singe step. | |||||
// See https://redis.io/topics/pipelining | |||||
// | |||||
// Pay attention, that Pipeline is not a transaction, so you can get unexpected | |||||
// results in case of big pipelines and small read/write timeouts. | |||||
// Redis client has retransmission logic in case of timeouts, pipeline | |||||
// can be retransmitted and commands can be executed more then once. | |||||
// To avoid this: it is good idea to use reasonable bigger read/write timeouts | |||||
// depends of your batch size and/or use TxPipeline. | |||||
type Pipeliner interface { | |||||
StatefulCmdable | |||||
Do(ctx context.Context, args ...interface{}) *Cmd | |||||
Process(ctx context.Context, cmd Cmder) error | |||||
Close() error | |||||
Discard() error | |||||
Exec(ctx context.Context) ([]Cmder, error) | |||||
} | |||||
var _ Pipeliner = (*Pipeline)(nil) | |||||
// Pipeline implements pipelining as described in | |||||
// http://redis.io/topics/pipelining. It's safe for concurrent use | |||||
// by multiple goroutines. | |||||
type Pipeline struct { | |||||
cmdable | |||||
statefulCmdable | |||||
ctx context.Context | |||||
exec pipelineExecer | |||||
mu sync.Mutex | |||||
cmds []Cmder | |||||
closed bool | |||||
} | |||||
func (c *Pipeline) init() { | |||||
c.cmdable = c.Process | |||||
c.statefulCmdable = c.Process | |||||
} | |||||
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { | |||||
cmd := NewCmd(ctx, args...) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Process queues the cmd for later execution. | |||||
func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error { | |||||
c.mu.Lock() | |||||
c.cmds = append(c.cmds, cmd) | |||||
c.mu.Unlock() | |||||
return nil | |||||
} | |||||
// Close closes the pipeline, releasing any open resources. | |||||
func (c *Pipeline) Close() error { | |||||
c.mu.Lock() | |||||
_ = c.discard() | |||||
c.closed = true | |||||
c.mu.Unlock() | |||||
return nil | |||||
} | |||||
// Discard resets the pipeline and discards queued commands. | |||||
func (c *Pipeline) Discard() error { | |||||
c.mu.Lock() | |||||
err := c.discard() | |||||
c.mu.Unlock() | |||||
return err | |||||
} | |||||
func (c *Pipeline) discard() error { | |||||
if c.closed { | |||||
return pool.ErrClosed | |||||
} | |||||
c.cmds = c.cmds[:0] | |||||
return nil | |||||
} | |||||
// Exec executes all previously queued commands using one | |||||
// client-server roundtrip. | |||||
// | |||||
// Exec always returns list of commands and error of the first failed | |||||
// command if any. | |||||
func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if c.closed { | |||||
return nil, pool.ErrClosed | |||||
} | |||||
if len(c.cmds) == 0 { | |||||
return nil, nil | |||||
} | |||||
cmds := c.cmds | |||||
c.cmds = nil | |||||
return cmds, c.exec(ctx, cmds) | |||||
} | |||||
func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
if err := fn(c); err != nil { | |||||
return nil, err | |||||
} | |||||
cmds, err := c.Exec(ctx) | |||||
_ = c.Close() | |||||
return cmds, err | |||||
} | |||||
func (c *Pipeline) Pipeline() Pipeliner { | |||||
return c | |||||
} | |||||
func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.Pipelined(ctx, fn) | |||||
} | |||||
func (c *Pipeline) TxPipeline() Pipeliner { | |||||
return c | |||||
} |
@@ -0,0 +1,668 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"fmt" | |||||
"strings" | |||||
"sync" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
"github.com/go-redis/redis/v8/internal/proto" | |||||
) | |||||
// PubSub implements Pub/Sub commands as described in | |||||
// http://redis.io/topics/pubsub. Message receiving is NOT safe | |||||
// for concurrent use by multiple goroutines. | |||||
// | |||||
// PubSub automatically reconnects to Redis Server and resubscribes | |||||
// to the channels in case of network errors. | |||||
type PubSub struct { | |||||
opt *Options | |||||
newConn func(ctx context.Context, channels []string) (*pool.Conn, error) | |||||
closeConn func(*pool.Conn) error | |||||
mu sync.Mutex | |||||
cn *pool.Conn | |||||
channels map[string]struct{} | |||||
patterns map[string]struct{} | |||||
closed bool | |||||
exit chan struct{} | |||||
cmd *Cmd | |||||
chOnce sync.Once | |||||
msgCh *channel | |||||
allCh *channel | |||||
} | |||||
func (c *PubSub) init() { | |||||
c.exit = make(chan struct{}) | |||||
} | |||||
func (c *PubSub) String() string { | |||||
channels := mapKeys(c.channels) | |||||
channels = append(channels, mapKeys(c.patterns)...) | |||||
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", ")) | |||||
} | |||||
func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) { | |||||
c.mu.Lock() | |||||
cn, err := c.conn(ctx, nil) | |||||
c.mu.Unlock() | |||||
return cn, err | |||||
} | |||||
func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) { | |||||
if c.closed { | |||||
return nil, pool.ErrClosed | |||||
} | |||||
if c.cn != nil { | |||||
return c.cn, nil | |||||
} | |||||
channels := mapKeys(c.channels) | |||||
channels = append(channels, newChannels...) | |||||
cn, err := c.newConn(ctx, channels) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if err := c.resubscribe(ctx, cn); err != nil { | |||||
_ = c.closeConn(cn) | |||||
return nil, err | |||||
} | |||||
c.cn = cn | |||||
return cn, nil | |||||
} | |||||
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { | |||||
return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||||
return writeCmd(wr, cmd) | |||||
}) | |||||
} | |||||
func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error { | |||||
var firstErr error | |||||
if len(c.channels) > 0 { | |||||
firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels)) | |||||
} | |||||
if len(c.patterns) > 0 { | |||||
err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns)) | |||||
if err != nil && firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
} | |||||
return firstErr | |||||
} | |||||
func mapKeys(m map[string]struct{}) []string { | |||||
s := make([]string, len(m)) | |||||
i := 0 | |||||
for k := range m { | |||||
s[i] = k | |||||
i++ | |||||
} | |||||
return s | |||||
} | |||||
func (c *PubSub) _subscribe( | |||||
ctx context.Context, cn *pool.Conn, redisCmd string, channels []string, | |||||
) error { | |||||
args := make([]interface{}, 0, 1+len(channels)) | |||||
args = append(args, redisCmd) | |||||
for _, channel := range channels { | |||||
args = append(args, channel) | |||||
} | |||||
cmd := NewSliceCmd(ctx, args...) | |||||
return c.writeCmd(ctx, cn, cmd) | |||||
} | |||||
func (c *PubSub) releaseConnWithLock( | |||||
ctx context.Context, | |||||
cn *pool.Conn, | |||||
err error, | |||||
allowTimeout bool, | |||||
) { | |||||
c.mu.Lock() | |||||
c.releaseConn(ctx, cn, err, allowTimeout) | |||||
c.mu.Unlock() | |||||
} | |||||
func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) { | |||||
if c.cn != cn { | |||||
return | |||||
} | |||||
if isBadConn(err, allowTimeout, c.opt.Addr) { | |||||
c.reconnect(ctx, err) | |||||
} | |||||
} | |||||
func (c *PubSub) reconnect(ctx context.Context, reason error) { | |||||
_ = c.closeTheCn(reason) | |||||
_, _ = c.conn(ctx, nil) | |||||
} | |||||
func (c *PubSub) closeTheCn(reason error) error { | |||||
if c.cn == nil { | |||||
return nil | |||||
} | |||||
if !c.closed { | |||||
internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason) | |||||
} | |||||
err := c.closeConn(c.cn) | |||||
c.cn = nil | |||||
return err | |||||
} | |||||
func (c *PubSub) Close() error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if c.closed { | |||||
return pool.ErrClosed | |||||
} | |||||
c.closed = true | |||||
close(c.exit) | |||||
return c.closeTheCn(pool.ErrClosed) | |||||
} | |||||
// Subscribe the client to the specified channels. It returns | |||||
// empty subscription if there are no channels. | |||||
func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
err := c.subscribe(ctx, "subscribe", channels...) | |||||
if c.channels == nil { | |||||
c.channels = make(map[string]struct{}) | |||||
} | |||||
for _, s := range channels { | |||||
c.channels[s] = struct{}{} | |||||
} | |||||
return err | |||||
} | |||||
// PSubscribe the client to the given patterns. It returns | |||||
// empty subscription if there are no patterns. | |||||
func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
err := c.subscribe(ctx, "psubscribe", patterns...) | |||||
if c.patterns == nil { | |||||
c.patterns = make(map[string]struct{}) | |||||
} | |||||
for _, s := range patterns { | |||||
c.patterns[s] = struct{}{} | |||||
} | |||||
return err | |||||
} | |||||
// Unsubscribe the client from the given channels, or from all of | |||||
// them if none is given. | |||||
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
for _, channel := range channels { | |||||
delete(c.channels, channel) | |||||
} | |||||
err := c.subscribe(ctx, "unsubscribe", channels...) | |||||
return err | |||||
} | |||||
// PUnsubscribe the client from the given patterns, or from all of | |||||
// them if none is given. | |||||
func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
for _, pattern := range patterns { | |||||
delete(c.patterns, pattern) | |||||
} | |||||
err := c.subscribe(ctx, "punsubscribe", patterns...) | |||||
return err | |||||
} | |||||
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error { | |||||
cn, err := c.conn(ctx, channels) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
err = c._subscribe(ctx, cn, redisCmd, channels) | |||||
c.releaseConn(ctx, cn, err, false) | |||||
return err | |||||
} | |||||
func (c *PubSub) Ping(ctx context.Context, payload ...string) error { | |||||
args := []interface{}{"ping"} | |||||
if len(payload) == 1 { | |||||
args = append(args, payload[0]) | |||||
} | |||||
cmd := NewCmd(ctx, args...) | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
cn, err := c.conn(ctx, nil) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
err = c.writeCmd(ctx, cn, cmd) | |||||
c.releaseConn(ctx, cn, err, false) | |||||
return err | |||||
} | |||||
// Subscription received after a successful subscription to channel. | |||||
type Subscription struct { | |||||
// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". | |||||
Kind string | |||||
// Channel name we have subscribed to. | |||||
Channel string | |||||
// Number of channels we are currently subscribed to. | |||||
Count int | |||||
} | |||||
func (m *Subscription) String() string { | |||||
return fmt.Sprintf("%s: %s", m.Kind, m.Channel) | |||||
} | |||||
// Message received as result of a PUBLISH command issued by another client. | |||||
type Message struct { | |||||
Channel string | |||||
Pattern string | |||||
Payload string | |||||
PayloadSlice []string | |||||
} | |||||
func (m *Message) String() string { | |||||
return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) | |||||
} | |||||
// Pong received as result of a PING command issued by another client. | |||||
type Pong struct { | |||||
Payload string | |||||
} | |||||
func (p *Pong) String() string { | |||||
if p.Payload != "" { | |||||
return fmt.Sprintf("Pong<%s>", p.Payload) | |||||
} | |||||
return "Pong" | |||||
} | |||||
func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { | |||||
switch reply := reply.(type) { | |||||
case string: | |||||
return &Pong{ | |||||
Payload: reply, | |||||
}, nil | |||||
case []interface{}: | |||||
switch kind := reply[0].(string); kind { | |||||
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": | |||||
// Can be nil in case of "unsubscribe". | |||||
channel, _ := reply[1].(string) | |||||
return &Subscription{ | |||||
Kind: kind, | |||||
Channel: channel, | |||||
Count: int(reply[2].(int64)), | |||||
}, nil | |||||
case "message": | |||||
switch payload := reply[2].(type) { | |||||
case string: | |||||
return &Message{ | |||||
Channel: reply[1].(string), | |||||
Payload: payload, | |||||
}, nil | |||||
case []interface{}: | |||||
ss := make([]string, len(payload)) | |||||
for i, s := range payload { | |||||
ss[i] = s.(string) | |||||
} | |||||
return &Message{ | |||||
Channel: reply[1].(string), | |||||
PayloadSlice: ss, | |||||
}, nil | |||||
default: | |||||
return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload) | |||||
} | |||||
case "pmessage": | |||||
return &Message{ | |||||
Pattern: reply[1].(string), | |||||
Channel: reply[2].(string), | |||||
Payload: reply[3].(string), | |||||
}, nil | |||||
case "pong": | |||||
return &Pong{ | |||||
Payload: reply[1].(string), | |||||
}, nil | |||||
default: | |||||
return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind) | |||||
} | |||||
default: | |||||
return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply) | |||||
} | |||||
} | |||||
// ReceiveTimeout acts like Receive but returns an error if message | |||||
// is not received in time. This is low-level API and in most cases | |||||
// Channel should be used instead. | |||||
func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) { | |||||
if c.cmd == nil { | |||||
c.cmd = NewCmd(ctx) | |||||
} | |||||
// Don't hold the lock to allow subscriptions and pings. | |||||
cn, err := c.connWithLock(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { | |||||
return c.cmd.readReply(rd) | |||||
}) | |||||
c.releaseConnWithLock(ctx, cn, err, timeout > 0) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return c.newMessage(c.cmd.Val()) | |||||
} | |||||
// Receive returns a message as a Subscription, Message, Pong or error. | |||||
// See PubSub example for details. This is low-level API and in most cases | |||||
// Channel should be used instead. | |||||
func (c *PubSub) Receive(ctx context.Context) (interface{}, error) { | |||||
return c.ReceiveTimeout(ctx, 0) | |||||
} | |||||
// ReceiveMessage returns a Message or error ignoring Subscription and Pong | |||||
// messages. This is low-level API and in most cases Channel should be used | |||||
// instead. | |||||
func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) { | |||||
for { | |||||
msg, err := c.Receive(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
switch msg := msg.(type) { | |||||
case *Subscription: | |||||
// Ignore. | |||||
case *Pong: | |||||
// Ignore. | |||||
case *Message: | |||||
return msg, nil | |||||
default: | |||||
err := fmt.Errorf("redis: unknown message: %T", msg) | |||||
return nil, err | |||||
} | |||||
} | |||||
} | |||||
func (c *PubSub) getContext() context.Context { | |||||
if c.cmd != nil { | |||||
return c.cmd.ctx | |||||
} | |||||
return context.Background() | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// Channel returns a Go channel for concurrently receiving messages. | |||||
// The channel is closed together with the PubSub. If the Go channel | |||||
// is blocked full for 30 seconds the message is dropped. | |||||
// Receive* APIs can not be used after channel is created. | |||||
// | |||||
// go-redis periodically sends ping messages to test connection health | |||||
// and re-subscribes if ping can not not received for 30 seconds. | |||||
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message { | |||||
c.chOnce.Do(func() { | |||||
c.msgCh = newChannel(c, opts...) | |||||
c.msgCh.initMsgChan() | |||||
}) | |||||
if c.msgCh == nil { | |||||
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions") | |||||
panic(err) | |||||
} | |||||
return c.msgCh.msgCh | |||||
} | |||||
// ChannelSize is like Channel, but creates a Go channel | |||||
// with specified buffer size. | |||||
// | |||||
// Deprecated: use Channel(WithChannelSize(size)), remove in v9. | |||||
func (c *PubSub) ChannelSize(size int) <-chan *Message { | |||||
return c.Channel(WithChannelSize(size)) | |||||
} | |||||
// ChannelWithSubscriptions is like Channel, but message type can be either | |||||
// *Subscription or *Message. Subscription messages can be used to detect | |||||
// reconnections. | |||||
// | |||||
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize. | |||||
func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} { | |||||
c.chOnce.Do(func() { | |||||
c.allCh = newChannel(c, WithChannelSize(size)) | |||||
c.allCh.initAllChan() | |||||
}) | |||||
if c.allCh == nil { | |||||
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel") | |||||
panic(err) | |||||
} | |||||
return c.allCh.allCh | |||||
} | |||||
type ChannelOption func(c *channel) | |||||
// WithChannelSize specifies the Go chan size that is used to buffer incoming messages. | |||||
// | |||||
// The default is 100 messages. | |||||
func WithChannelSize(size int) ChannelOption { | |||||
return func(c *channel) { | |||||
c.chanSize = size | |||||
} | |||||
} | |||||
// WithChannelHealthCheckInterval specifies the health check interval. | |||||
// PubSub will ping Redis Server if it does not receive any messages within the interval. | |||||
// To disable health check, use zero interval. | |||||
// | |||||
// The default is 3 seconds. | |||||
func WithChannelHealthCheckInterval(d time.Duration) ChannelOption { | |||||
return func(c *channel) { | |||||
c.checkInterval = d | |||||
} | |||||
} | |||||
// WithChannelSendTimeout specifies the channel send timeout after which | |||||
// the message is dropped. | |||||
// | |||||
// The default is 60 seconds. | |||||
func WithChannelSendTimeout(d time.Duration) ChannelOption { | |||||
return func(c *channel) { | |||||
c.chanSendTimeout = d | |||||
} | |||||
} | |||||
type channel struct { | |||||
pubSub *PubSub | |||||
msgCh chan *Message | |||||
allCh chan interface{} | |||||
ping chan struct{} | |||||
chanSize int | |||||
chanSendTimeout time.Duration | |||||
checkInterval time.Duration | |||||
} | |||||
func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel { | |||||
c := &channel{ | |||||
pubSub: pubSub, | |||||
chanSize: 100, | |||||
chanSendTimeout: time.Minute, | |||||
checkInterval: 3 * time.Second, | |||||
} | |||||
for _, opt := range opts { | |||||
opt(c) | |||||
} | |||||
if c.checkInterval > 0 { | |||||
c.initHealthCheck() | |||||
} | |||||
return c | |||||
} | |||||
func (c *channel) initHealthCheck() { | |||||
ctx := context.TODO() | |||||
c.ping = make(chan struct{}, 1) | |||||
go func() { | |||||
timer := time.NewTimer(time.Minute) | |||||
timer.Stop() | |||||
for { | |||||
timer.Reset(c.checkInterval) | |||||
select { | |||||
case <-c.ping: | |||||
if !timer.Stop() { | |||||
<-timer.C | |||||
} | |||||
case <-timer.C: | |||||
if pingErr := c.pubSub.Ping(ctx); pingErr != nil { | |||||
c.pubSub.mu.Lock() | |||||
c.pubSub.reconnect(ctx, pingErr) | |||||
c.pubSub.mu.Unlock() | |||||
} | |||||
case <-c.pubSub.exit: | |||||
return | |||||
} | |||||
} | |||||
}() | |||||
} | |||||
// initMsgChan must be in sync with initAllChan. | |||||
func (c *channel) initMsgChan() { | |||||
ctx := context.TODO() | |||||
c.msgCh = make(chan *Message, c.chanSize) | |||||
go func() { | |||||
timer := time.NewTimer(time.Minute) | |||||
timer.Stop() | |||||
var errCount int | |||||
for { | |||||
msg, err := c.pubSub.Receive(ctx) | |||||
if err != nil { | |||||
if err == pool.ErrClosed { | |||||
close(c.msgCh) | |||||
return | |||||
} | |||||
if errCount > 0 { | |||||
time.Sleep(100 * time.Millisecond) | |||||
} | |||||
errCount++ | |||||
continue | |||||
} | |||||
errCount = 0 | |||||
// Any message is as good as a ping. | |||||
select { | |||||
case c.ping <- struct{}{}: | |||||
default: | |||||
} | |||||
switch msg := msg.(type) { | |||||
case *Subscription: | |||||
// Ignore. | |||||
case *Pong: | |||||
// Ignore. | |||||
case *Message: | |||||
timer.Reset(c.chanSendTimeout) | |||||
select { | |||||
case c.msgCh <- msg: | |||||
if !timer.Stop() { | |||||
<-timer.C | |||||
} | |||||
case <-timer.C: | |||||
internal.Logger.Printf( | |||||
ctx, "redis: %s channel is full for %s (message is dropped)", | |||||
c, c.chanSendTimeout) | |||||
} | |||||
default: | |||||
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg) | |||||
} | |||||
} | |||||
}() | |||||
} | |||||
// initAllChan must be in sync with initMsgChan. | |||||
func (c *channel) initAllChan() { | |||||
ctx := context.TODO() | |||||
c.allCh = make(chan interface{}, c.chanSize) | |||||
go func() { | |||||
timer := time.NewTimer(time.Minute) | |||||
timer.Stop() | |||||
var errCount int | |||||
for { | |||||
msg, err := c.pubSub.Receive(ctx) | |||||
if err != nil { | |||||
if err == pool.ErrClosed { | |||||
close(c.allCh) | |||||
return | |||||
} | |||||
if errCount > 0 { | |||||
time.Sleep(100 * time.Millisecond) | |||||
} | |||||
errCount++ | |||||
continue | |||||
} | |||||
errCount = 0 | |||||
// Any message is as good as a ping. | |||||
select { | |||||
case c.ping <- struct{}{}: | |||||
default: | |||||
} | |||||
switch msg := msg.(type) { | |||||
case *Pong: | |||||
// Ignore. | |||||
case *Subscription, *Message: | |||||
timer.Reset(c.chanSendTimeout) | |||||
select { | |||||
case c.allCh <- msg: | |||||
if !timer.Stop() { | |||||
<-timer.C | |||||
} | |||||
case <-timer.C: | |||||
internal.Logger.Printf( | |||||
ctx, "redis: %s channel is full for %s (message is dropped)", | |||||
c, c.chanSendTimeout) | |||||
} | |||||
default: | |||||
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg) | |||||
} | |||||
} | |||||
}() | |||||
} |
@@ -0,0 +1,773 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"errors" | |||||
"fmt" | |||||
"sync/atomic" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
"github.com/go-redis/redis/v8/internal/proto" | |||||
) | |||||
// Nil reply returned by Redis when key does not exist. | |||||
const Nil = proto.Nil | |||||
func SetLogger(logger internal.Logging) { | |||||
internal.Logger = logger | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type Hook interface { | |||||
BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) | |||||
AfterProcess(ctx context.Context, cmd Cmder) error | |||||
BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) | |||||
AfterProcessPipeline(ctx context.Context, cmds []Cmder) error | |||||
} | |||||
type hooks struct { | |||||
hooks []Hook | |||||
} | |||||
func (hs *hooks) lock() { | |||||
hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)] | |||||
} | |||||
func (hs hooks) clone() hooks { | |||||
clone := hs | |||||
clone.lock() | |||||
return clone | |||||
} | |||||
func (hs *hooks) AddHook(hook Hook) { | |||||
hs.hooks = append(hs.hooks, hook) | |||||
} | |||||
func (hs hooks) process( | |||||
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error, | |||||
) error { | |||||
if len(hs.hooks) == 0 { | |||||
err := fn(ctx, cmd) | |||||
cmd.SetErr(err) | |||||
return err | |||||
} | |||||
var hookIndex int | |||||
var retErr error | |||||
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { | |||||
ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd) | |||||
if retErr != nil { | |||||
cmd.SetErr(retErr) | |||||
} | |||||
} | |||||
if retErr == nil { | |||||
retErr = fn(ctx, cmd) | |||||
cmd.SetErr(retErr) | |||||
} | |||||
for hookIndex--; hookIndex >= 0; hookIndex-- { | |||||
if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil { | |||||
retErr = err | |||||
cmd.SetErr(retErr) | |||||
} | |||||
} | |||||
return retErr | |||||
} | |||||
func (hs hooks) processPipeline( | |||||
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, | |||||
) error { | |||||
if len(hs.hooks) == 0 { | |||||
err := fn(ctx, cmds) | |||||
return err | |||||
} | |||||
var hookIndex int | |||||
var retErr error | |||||
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { | |||||
ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds) | |||||
if retErr != nil { | |||||
setCmdsErr(cmds, retErr) | |||||
} | |||||
} | |||||
if retErr == nil { | |||||
retErr = fn(ctx, cmds) | |||||
} | |||||
for hookIndex--; hookIndex >= 0; hookIndex-- { | |||||
if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil { | |||||
retErr = err | |||||
setCmdsErr(cmds, retErr) | |||||
} | |||||
} | |||||
return retErr | |||||
} | |||||
func (hs hooks) processTxPipeline( | |||||
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, | |||||
) error { | |||||
cmds = wrapMultiExec(ctx, cmds) | |||||
return hs.processPipeline(ctx, cmds, fn) | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type baseClient struct { | |||||
opt *Options | |||||
connPool pool.Pooler | |||||
onClose func() error // hook called when client is closed | |||||
} | |||||
func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient { | |||||
return &baseClient{ | |||||
opt: opt, | |||||
connPool: connPool, | |||||
} | |||||
} | |||||
func (c *baseClient) clone() *baseClient { | |||||
clone := *c | |||||
return &clone | |||||
} | |||||
func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { | |||||
opt := c.opt.clone() | |||||
opt.ReadTimeout = timeout | |||||
opt.WriteTimeout = timeout | |||||
clone := c.clone() | |||||
clone.opt = opt | |||||
return clone | |||||
} | |||||
func (c *baseClient) String() string { | |||||
return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) | |||||
} | |||||
func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { | |||||
cn, err := c.connPool.NewConn(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
err = c.initConn(ctx, cn) | |||||
if err != nil { | |||||
_ = c.connPool.CloseConn(cn) | |||||
return nil, err | |||||
} | |||||
return cn, nil | |||||
} | |||||
func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { | |||||
if c.opt.Limiter != nil { | |||||
err := c.opt.Limiter.Allow() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
cn, err := c._getConn(ctx) | |||||
if err != nil { | |||||
if c.opt.Limiter != nil { | |||||
c.opt.Limiter.ReportResult(err) | |||||
} | |||||
return nil, err | |||||
} | |||||
return cn, nil | |||||
} | |||||
func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { | |||||
cn, err := c.connPool.Get(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if cn.Inited { | |||||
return cn, nil | |||||
} | |||||
if err := c.initConn(ctx, cn); err != nil { | |||||
c.connPool.Remove(ctx, cn, err) | |||||
if err := errors.Unwrap(err); err != nil { | |||||
return nil, err | |||||
} | |||||
return nil, err | |||||
} | |||||
return cn, nil | |||||
} | |||||
func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { | |||||
if cn.Inited { | |||||
return nil | |||||
} | |||||
cn.Inited = true | |||||
if c.opt.Password == "" && | |||||
c.opt.DB == 0 && | |||||
!c.opt.readOnly && | |||||
c.opt.OnConnect == nil { | |||||
return nil | |||||
} | |||||
connPool := pool.NewSingleConnPool(c.connPool, cn) | |||||
conn := newConn(ctx, c.opt, connPool) | |||||
_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { | |||||
if c.opt.Password != "" { | |||||
if c.opt.Username != "" { | |||||
pipe.AuthACL(ctx, c.opt.Username, c.opt.Password) | |||||
} else { | |||||
pipe.Auth(ctx, c.opt.Password) | |||||
} | |||||
} | |||||
if c.opt.DB > 0 { | |||||
pipe.Select(ctx, c.opt.DB) | |||||
} | |||||
if c.opt.readOnly { | |||||
pipe.ReadOnly(ctx) | |||||
} | |||||
return nil | |||||
}) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if c.opt.OnConnect != nil { | |||||
return c.opt.OnConnect(ctx, conn) | |||||
} | |||||
return nil | |||||
} | |||||
func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) { | |||||
if c.opt.Limiter != nil { | |||||
c.opt.Limiter.ReportResult(err) | |||||
} | |||||
if isBadConn(err, false, c.opt.Addr) { | |||||
c.connPool.Remove(ctx, cn, err) | |||||
} else { | |||||
c.connPool.Put(ctx, cn) | |||||
} | |||||
} | |||||
func (c *baseClient) withConn( | |||||
ctx context.Context, fn func(context.Context, *pool.Conn) error, | |||||
) error { | |||||
cn, err := c.getConn(ctx) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
defer func() { | |||||
c.releaseConn(ctx, cn, err) | |||||
}() | |||||
done := ctx.Done() //nolint:ifshort | |||||
if done == nil { | |||||
err = fn(ctx, cn) | |||||
return err | |||||
} | |||||
errc := make(chan error, 1) | |||||
go func() { errc <- fn(ctx, cn) }() | |||||
select { | |||||
case <-done: | |||||
_ = cn.Close() | |||||
// Wait for the goroutine to finish and send something. | |||||
<-errc | |||||
err = ctx.Err() | |||||
return err | |||||
case err = <-errc: | |||||
return err | |||||
} | |||||
} | |||||
func (c *baseClient) process(ctx context.Context, cmd Cmder) error { | |||||
var lastErr error | |||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||||
attempt := attempt | |||||
retry, err := c._process(ctx, cmd, attempt) | |||||
if err == nil || !retry { | |||||
return err | |||||
} | |||||
lastErr = err | |||||
} | |||||
return lastErr | |||||
} | |||||
func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { | |||||
if attempt > 0 { | |||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { | |||||
return false, err | |||||
} | |||||
} | |||||
retryTimeout := uint32(1) | |||||
err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { | |||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||||
return writeCmd(wr, cmd) | |||||
}) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply) | |||||
if err != nil { | |||||
if cmd.readTimeout() == nil { | |||||
atomic.StoreUint32(&retryTimeout, 1) | |||||
} | |||||
return err | |||||
} | |||||
return nil | |||||
}) | |||||
if err == nil { | |||||
return false, nil | |||||
} | |||||
retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) | |||||
return retry, err | |||||
} | |||||
func (c *baseClient) retryBackoff(attempt int) time.Duration { | |||||
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | |||||
} | |||||
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { | |||||
if timeout := cmd.readTimeout(); timeout != nil { | |||||
t := *timeout | |||||
if t == 0 { | |||||
return 0 | |||||
} | |||||
return t + 10*time.Second | |||||
} | |||||
return c.opt.ReadTimeout | |||||
} | |||||
// Close closes the client, releasing any open resources. | |||||
// | |||||
// It is rare to Close a Client, as the Client is meant to be | |||||
// long-lived and shared between many goroutines. | |||||
func (c *baseClient) Close() error { | |||||
var firstErr error | |||||
if c.onClose != nil { | |||||
if err := c.onClose(); err != nil { | |||||
firstErr = err | |||||
} | |||||
} | |||||
if err := c.connPool.Close(); err != nil && firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
return firstErr | |||||
} | |||||
func (c *baseClient) getAddr() string { | |||||
return c.opt.Addr | |||||
} | |||||
func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds) | |||||
} | |||||
func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds) | |||||
} | |||||
type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) | |||||
func (c *baseClient) generalProcessPipeline( | |||||
ctx context.Context, cmds []Cmder, p pipelineProcessor, | |||||
) error { | |||||
err := c._generalProcessPipeline(ctx, cmds, p) | |||||
if err != nil { | |||||
setCmdsErr(cmds, err) | |||||
return err | |||||
} | |||||
return cmdsFirstErr(cmds) | |||||
} | |||||
func (c *baseClient) _generalProcessPipeline( | |||||
ctx context.Context, cmds []Cmder, p pipelineProcessor, | |||||
) error { | |||||
var lastErr error | |||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||||
if attempt > 0 { | |||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { | |||||
return err | |||||
} | |||||
} | |||||
var canRetry bool | |||||
lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { | |||||
var err error | |||||
canRetry, err = p(ctx, cn, cmds) | |||||
return err | |||||
}) | |||||
if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { | |||||
return lastErr | |||||
} | |||||
} | |||||
return lastErr | |||||
} | |||||
func (c *baseClient) pipelineProcessCmds( | |||||
ctx context.Context, cn *pool.Conn, cmds []Cmder, | |||||
) (bool, error) { | |||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||||
return writeCmds(wr, cmds) | |||||
}) | |||||
if err != nil { | |||||
return true, err | |||||
} | |||||
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { | |||||
return pipelineReadCmds(rd, cmds) | |||||
}) | |||||
return true, err | |||||
} | |||||
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { | |||||
for _, cmd := range cmds { | |||||
err := cmd.readReply(rd) | |||||
cmd.SetErr(err) | |||||
if err != nil && !isRedisError(err) { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (c *baseClient) txPipelineProcessCmds( | |||||
ctx context.Context, cn *pool.Conn, cmds []Cmder, | |||||
) (bool, error) { | |||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||||
return writeCmds(wr, cmds) | |||||
}) | |||||
if err != nil { | |||||
return true, err | |||||
} | |||||
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { | |||||
statusCmd := cmds[0].(*StatusCmd) | |||||
// Trim multi and exec. | |||||
cmds = cmds[1 : len(cmds)-1] | |||||
err := txPipelineReadQueued(rd, statusCmd, cmds) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return pipelineReadCmds(rd, cmds) | |||||
}) | |||||
return false, err | |||||
} | |||||
func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder { | |||||
if len(cmds) == 0 { | |||||
panic("not reached") | |||||
} | |||||
cmdCopy := make([]Cmder, len(cmds)+2) | |||||
cmdCopy[0] = NewStatusCmd(ctx, "multi") | |||||
copy(cmdCopy[1:], cmds) | |||||
cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec") | |||||
return cmdCopy | |||||
} | |||||
func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { | |||||
// Parse queued replies. | |||||
if err := statusCmd.readReply(rd); err != nil { | |||||
return err | |||||
} | |||||
for range cmds { | |||||
if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { | |||||
return err | |||||
} | |||||
} | |||||
// Parse number of replies. | |||||
line, err := rd.ReadLine() | |||||
if err != nil { | |||||
if err == Nil { | |||||
err = TxFailedErr | |||||
} | |||||
return err | |||||
} | |||||
switch line[0] { | |||||
case proto.ErrorReply: | |||||
return proto.ParseErrorReply(line) | |||||
case proto.ArrayReply: | |||||
// ok | |||||
default: | |||||
err := fmt.Errorf("redis: expected '*', but got line %q", line) | |||||
return err | |||||
} | |||||
return nil | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// Client is a Redis client representing a pool of zero or more | |||||
// underlying connections. It's safe for concurrent use by multiple | |||||
// goroutines. | |||||
type Client struct { | |||||
*baseClient | |||||
cmdable | |||||
hooks | |||||
ctx context.Context | |||||
} | |||||
// NewClient returns a client to the Redis Server specified by Options. | |||||
func NewClient(opt *Options) *Client { | |||||
opt.init() | |||||
c := Client{ | |||||
baseClient: newBaseClient(opt, newConnPool(opt)), | |||||
ctx: context.Background(), | |||||
} | |||||
c.cmdable = c.Process | |||||
return &c | |||||
} | |||||
func (c *Client) clone() *Client { | |||||
clone := *c | |||||
clone.cmdable = clone.Process | |||||
clone.hooks.lock() | |||||
return &clone | |||||
} | |||||
func (c *Client) WithTimeout(timeout time.Duration) *Client { | |||||
clone := c.clone() | |||||
clone.baseClient = c.baseClient.withTimeout(timeout) | |||||
return clone | |||||
} | |||||
func (c *Client) Context() context.Context { | |||||
return c.ctx | |||||
} | |||||
func (c *Client) WithContext(ctx context.Context) *Client { | |||||
if ctx == nil { | |||||
panic("nil context") | |||||
} | |||||
clone := c.clone() | |||||
clone.ctx = ctx | |||||
return clone | |||||
} | |||||
func (c *Client) Conn(ctx context.Context) *Conn { | |||||
return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool)) | |||||
} | |||||
// Do creates a Cmd from the args and processes the cmd. | |||||
func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { | |||||
cmd := NewCmd(ctx, args...) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
func (c *Client) Process(ctx context.Context, cmd Cmder) error { | |||||
return c.hooks.process(ctx, cmd, c.baseClient.process) | |||||
} | |||||
func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) | |||||
} | |||||
func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) | |||||
} | |||||
// Options returns read-only Options that were used to create the client. | |||||
func (c *Client) Options() *Options { | |||||
return c.opt | |||||
} | |||||
type PoolStats pool.Stats | |||||
// PoolStats returns connection pool stats. | |||||
func (c *Client) PoolStats() *PoolStats { | |||||
stats := c.connPool.Stats() | |||||
return (*PoolStats)(stats) | |||||
} | |||||
func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.Pipeline().Pipelined(ctx, fn) | |||||
} | |||||
func (c *Client) Pipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: c.processPipeline, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} | |||||
func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.TxPipeline().Pipelined(ctx, fn) | |||||
} | |||||
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. | |||||
func (c *Client) TxPipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: c.processTxPipeline, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} | |||||
func (c *Client) pubSub() *PubSub { | |||||
pubsub := &PubSub{ | |||||
opt: c.opt, | |||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { | |||||
return c.newConn(ctx) | |||||
}, | |||||
closeConn: c.connPool.CloseConn, | |||||
} | |||||
pubsub.init() | |||||
return pubsub | |||||
} | |||||
// Subscribe subscribes the client to the specified channels. | |||||
// Channels can be omitted to create empty subscription. | |||||
// Note that this method does not wait on a response from Redis, so the | |||||
// subscription may not be active immediately. To force the connection to wait, | |||||
// you may call the Receive() method on the returned *PubSub like so: | |||||
// | |||||
// sub := client.Subscribe(queryResp) | |||||
// iface, err := sub.Receive() | |||||
// if err != nil { | |||||
// // handle error | |||||
// } | |||||
// | |||||
// // Should be *Subscription, but others are possible if other actions have been | |||||
// // taken on sub since it was created. | |||||
// switch iface.(type) { | |||||
// case *Subscription: | |||||
// // subscribe succeeded | |||||
// case *Message: | |||||
// // received first message | |||||
// case *Pong: | |||||
// // pong received | |||||
// default: | |||||
// // handle error | |||||
// } | |||||
// | |||||
// ch := sub.Channel() | |||||
func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { | |||||
pubsub := c.pubSub() | |||||
if len(channels) > 0 { | |||||
_ = pubsub.Subscribe(ctx, channels...) | |||||
} | |||||
return pubsub | |||||
} | |||||
// PSubscribe subscribes the client to the given patterns. | |||||
// Patterns can be omitted to create empty subscription. | |||||
func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub { | |||||
pubsub := c.pubSub() | |||||
if len(channels) > 0 { | |||||
_ = pubsub.PSubscribe(ctx, channels...) | |||||
} | |||||
return pubsub | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type conn struct { | |||||
baseClient | |||||
cmdable | |||||
statefulCmdable | |||||
hooks // TODO: inherit hooks | |||||
} | |||||
// Conn represents a single Redis connection rather than a pool of connections. | |||||
// Prefer running commands from Client unless there is a specific need | |||||
// for a continuous single Redis connection. | |||||
type Conn struct { | |||||
*conn | |||||
ctx context.Context | |||||
} | |||||
func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn { | |||||
c := Conn{ | |||||
conn: &conn{ | |||||
baseClient: baseClient{ | |||||
opt: opt, | |||||
connPool: connPool, | |||||
}, | |||||
}, | |||||
ctx: ctx, | |||||
} | |||||
c.cmdable = c.Process | |||||
c.statefulCmdable = c.Process | |||||
return &c | |||||
} | |||||
func (c *Conn) Process(ctx context.Context, cmd Cmder) error { | |||||
return c.hooks.process(ctx, cmd, c.baseClient.process) | |||||
} | |||||
func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) | |||||
} | |||||
func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) | |||||
} | |||||
func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.Pipeline().Pipelined(ctx, fn) | |||||
} | |||||
func (c *Conn) Pipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: c.processPipeline, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} | |||||
func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.TxPipeline().Pipelined(ctx, fn) | |||||
} | |||||
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. | |||||
func (c *Conn) TxPipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: c.processTxPipeline, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} |
@@ -0,0 +1,180 @@ | |||||
package redis | |||||
import "time" | |||||
// NewCmdResult returns a Cmd initialised with val and err for testing. | |||||
func NewCmdResult(val interface{}, err error) *Cmd { | |||||
var cmd Cmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewSliceResult returns a SliceCmd initialised with val and err for testing. | |||||
func NewSliceResult(val []interface{}, err error) *SliceCmd { | |||||
var cmd SliceCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewStatusResult returns a StatusCmd initialised with val and err for testing. | |||||
func NewStatusResult(val string, err error) *StatusCmd { | |||||
var cmd StatusCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewIntResult returns an IntCmd initialised with val and err for testing. | |||||
func NewIntResult(val int64, err error) *IntCmd { | |||||
var cmd IntCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewDurationResult returns a DurationCmd initialised with val and err for testing. | |||||
func NewDurationResult(val time.Duration, err error) *DurationCmd { | |||||
var cmd DurationCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewBoolResult returns a BoolCmd initialised with val and err for testing. | |||||
func NewBoolResult(val bool, err error) *BoolCmd { | |||||
var cmd BoolCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewStringResult returns a StringCmd initialised with val and err for testing. | |||||
func NewStringResult(val string, err error) *StringCmd { | |||||
var cmd StringCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewFloatResult returns a FloatCmd initialised with val and err for testing. | |||||
func NewFloatResult(val float64, err error) *FloatCmd { | |||||
var cmd FloatCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing. | |||||
func NewStringSliceResult(val []string, err error) *StringSliceCmd { | |||||
var cmd StringSliceCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing. | |||||
func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { | |||||
var cmd BoolSliceCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing. | |||||
func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd { | |||||
var cmd StringStringMapCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing. | |||||
func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd { | |||||
var cmd StringIntMapCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing. | |||||
func NewTimeCmdResult(val time.Time, err error) *TimeCmd { | |||||
var cmd TimeCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing. | |||||
func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd { | |||||
var cmd ZSliceCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing. | |||||
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd { | |||||
var cmd ZWithKeyCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewScanCmdResult returns a ScanCmd initialised with val and err for testing. | |||||
func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd { | |||||
var cmd ScanCmd | |||||
cmd.page = keys | |||||
cmd.cursor = cursor | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing. | |||||
func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd { | |||||
var cmd ClusterSlotsCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing. | |||||
func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd { | |||||
var cmd GeoLocationCmd | |||||
cmd.locations = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing. | |||||
func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd { | |||||
var cmd GeoPosCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing. | |||||
func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd { | |||||
var cmd CommandsInfoCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing. | |||||
func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd { | |||||
var cmd XMessageSliceCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} | |||||
// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing. | |||||
func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd { | |||||
var cmd XStreamSliceCmd | |||||
cmd.val = val | |||||
cmd.SetErr(err) | |||||
return &cmd | |||||
} |
@@ -0,0 +1,736 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"crypto/tls" | |||||
"errors" | |||||
"fmt" | |||||
"net" | |||||
"strconv" | |||||
"sync" | |||||
"sync/atomic" | |||||
"time" | |||||
"github.com/cespare/xxhash/v2" | |||||
rendezvous "github.com/dgryski/go-rendezvous" //nolint | |||||
"github.com/go-redis/redis/v8/internal" | |||||
"github.com/go-redis/redis/v8/internal/hashtag" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
"github.com/go-redis/redis/v8/internal/rand" | |||||
) | |||||
var errRingShardsDown = errors.New("redis: all ring shards are down") | |||||
//------------------------------------------------------------------------------ | |||||
type ConsistentHash interface { | |||||
Get(string) string | |||||
} | |||||
type rendezvousWrapper struct { | |||||
*rendezvous.Rendezvous | |||||
} | |||||
func (w rendezvousWrapper) Get(key string) string { | |||||
return w.Lookup(key) | |||||
} | |||||
func newRendezvous(shards []string) ConsistentHash { | |||||
return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)} | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// RingOptions are used to configure a ring client and should be | |||||
// passed to NewRing. | |||||
type RingOptions struct { | |||||
// Map of name => host:port addresses of ring shards. | |||||
Addrs map[string]string | |||||
// NewClient creates a shard client with provided name and options. | |||||
NewClient func(name string, opt *Options) *Client | |||||
// Frequency of PING commands sent to check shards availability. | |||||
// Shard is considered down after 3 subsequent failed checks. | |||||
HeartbeatFrequency time.Duration | |||||
// NewConsistentHash returns a consistent hash that is used | |||||
// to distribute keys across the shards. | |||||
// | |||||
// See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8 | |||||
// for consistent hashing algorithmic tradeoffs. | |||||
NewConsistentHash func(shards []string) ConsistentHash | |||||
// Following options are copied from Options struct. | |||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error) | |||||
OnConnect func(ctx context.Context, cn *Conn) error | |||||
Username string | |||||
Password string | |||||
DB int | |||||
MaxRetries int | |||||
MinRetryBackoff time.Duration | |||||
MaxRetryBackoff time.Duration | |||||
DialTimeout time.Duration | |||||
ReadTimeout time.Duration | |||||
WriteTimeout time.Duration | |||||
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). | |||||
PoolFIFO bool | |||||
PoolSize int | |||||
MinIdleConns int | |||||
MaxConnAge time.Duration | |||||
PoolTimeout time.Duration | |||||
IdleTimeout time.Duration | |||||
IdleCheckFrequency time.Duration | |||||
TLSConfig *tls.Config | |||||
Limiter Limiter | |||||
} | |||||
func (opt *RingOptions) init() { | |||||
if opt.NewClient == nil { | |||||
opt.NewClient = func(name string, opt *Options) *Client { | |||||
return NewClient(opt) | |||||
} | |||||
} | |||||
if opt.HeartbeatFrequency == 0 { | |||||
opt.HeartbeatFrequency = 500 * time.Millisecond | |||||
} | |||||
if opt.NewConsistentHash == nil { | |||||
opt.NewConsistentHash = newRendezvous | |||||
} | |||||
if opt.MaxRetries == -1 { | |||||
opt.MaxRetries = 0 | |||||
} else if opt.MaxRetries == 0 { | |||||
opt.MaxRetries = 3 | |||||
} | |||||
switch opt.MinRetryBackoff { | |||||
case -1: | |||||
opt.MinRetryBackoff = 0 | |||||
case 0: | |||||
opt.MinRetryBackoff = 8 * time.Millisecond | |||||
} | |||||
switch opt.MaxRetryBackoff { | |||||
case -1: | |||||
opt.MaxRetryBackoff = 0 | |||||
case 0: | |||||
opt.MaxRetryBackoff = 512 * time.Millisecond | |||||
} | |||||
} | |||||
func (opt *RingOptions) clientOptions() *Options { | |||||
return &Options{ | |||||
Dialer: opt.Dialer, | |||||
OnConnect: opt.OnConnect, | |||||
Username: opt.Username, | |||||
Password: opt.Password, | |||||
DB: opt.DB, | |||||
MaxRetries: -1, | |||||
DialTimeout: opt.DialTimeout, | |||||
ReadTimeout: opt.ReadTimeout, | |||||
WriteTimeout: opt.WriteTimeout, | |||||
PoolFIFO: opt.PoolFIFO, | |||||
PoolSize: opt.PoolSize, | |||||
MinIdleConns: opt.MinIdleConns, | |||||
MaxConnAge: opt.MaxConnAge, | |||||
PoolTimeout: opt.PoolTimeout, | |||||
IdleTimeout: opt.IdleTimeout, | |||||
IdleCheckFrequency: opt.IdleCheckFrequency, | |||||
TLSConfig: opt.TLSConfig, | |||||
Limiter: opt.Limiter, | |||||
} | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type ringShard struct { | |||||
Client *Client | |||||
down int32 | |||||
} | |||||
func newRingShard(opt *RingOptions, name, addr string) *ringShard { | |||||
clopt := opt.clientOptions() | |||||
clopt.Addr = addr | |||||
return &ringShard{ | |||||
Client: opt.NewClient(name, clopt), | |||||
} | |||||
} | |||||
func (shard *ringShard) String() string { | |||||
var state string | |||||
if shard.IsUp() { | |||||
state = "up" | |||||
} else { | |||||
state = "down" | |||||
} | |||||
return fmt.Sprintf("%s is %s", shard.Client, state) | |||||
} | |||||
func (shard *ringShard) IsDown() bool { | |||||
const threshold = 3 | |||||
return atomic.LoadInt32(&shard.down) >= threshold | |||||
} | |||||
func (shard *ringShard) IsUp() bool { | |||||
return !shard.IsDown() | |||||
} | |||||
// Vote votes to set shard state and returns true if state was changed. | |||||
func (shard *ringShard) Vote(up bool) bool { | |||||
if up { | |||||
changed := shard.IsDown() | |||||
atomic.StoreInt32(&shard.down, 0) | |||||
return changed | |||||
} | |||||
if shard.IsDown() { | |||||
return false | |||||
} | |||||
atomic.AddInt32(&shard.down, 1) | |||||
return shard.IsDown() | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type ringShards struct { | |||||
opt *RingOptions | |||||
mu sync.RWMutex | |||||
hash ConsistentHash | |||||
shards map[string]*ringShard // read only | |||||
list []*ringShard // read only | |||||
numShard int | |||||
closed bool | |||||
} | |||||
func newRingShards(opt *RingOptions) *ringShards { | |||||
shards := make(map[string]*ringShard, len(opt.Addrs)) | |||||
list := make([]*ringShard, 0, len(shards)) | |||||
for name, addr := range opt.Addrs { | |||||
shard := newRingShard(opt, name, addr) | |||||
shards[name] = shard | |||||
list = append(list, shard) | |||||
} | |||||
c := &ringShards{ | |||||
opt: opt, | |||||
shards: shards, | |||||
list: list, | |||||
} | |||||
c.rebalance() | |||||
return c | |||||
} | |||||
func (c *ringShards) List() []*ringShard { | |||||
var list []*ringShard | |||||
c.mu.RLock() | |||||
if !c.closed { | |||||
list = c.list | |||||
} | |||||
c.mu.RUnlock() | |||||
return list | |||||
} | |||||
func (c *ringShards) Hash(key string) string { | |||||
key = hashtag.Key(key) | |||||
var hash string | |||||
c.mu.RLock() | |||||
if c.numShard > 0 { | |||||
hash = c.hash.Get(key) | |||||
} | |||||
c.mu.RUnlock() | |||||
return hash | |||||
} | |||||
func (c *ringShards) GetByKey(key string) (*ringShard, error) { | |||||
key = hashtag.Key(key) | |||||
c.mu.RLock() | |||||
if c.closed { | |||||
c.mu.RUnlock() | |||||
return nil, pool.ErrClosed | |||||
} | |||||
if c.numShard == 0 { | |||||
c.mu.RUnlock() | |||||
return nil, errRingShardsDown | |||||
} | |||||
hash := c.hash.Get(key) | |||||
if hash == "" { | |||||
c.mu.RUnlock() | |||||
return nil, errRingShardsDown | |||||
} | |||||
shard := c.shards[hash] | |||||
c.mu.RUnlock() | |||||
return shard, nil | |||||
} | |||||
func (c *ringShards) GetByName(shardName string) (*ringShard, error) { | |||||
if shardName == "" { | |||||
return c.Random() | |||||
} | |||||
c.mu.RLock() | |||||
shard := c.shards[shardName] | |||||
c.mu.RUnlock() | |||||
return shard, nil | |||||
} | |||||
func (c *ringShards) Random() (*ringShard, error) { | |||||
return c.GetByKey(strconv.Itoa(rand.Int())) | |||||
} | |||||
// heartbeat monitors state of each shard in the ring. | |||||
func (c *ringShards) Heartbeat(frequency time.Duration) { | |||||
ticker := time.NewTicker(frequency) | |||||
defer ticker.Stop() | |||||
ctx := context.Background() | |||||
for range ticker.C { | |||||
var rebalance bool | |||||
for _, shard := range c.List() { | |||||
err := shard.Client.Ping(ctx).Err() | |||||
isUp := err == nil || err == pool.ErrPoolTimeout | |||||
if shard.Vote(isUp) { | |||||
internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard) | |||||
rebalance = true | |||||
} | |||||
} | |||||
if rebalance { | |||||
c.rebalance() | |||||
} | |||||
} | |||||
} | |||||
// rebalance removes dead shards from the Ring. | |||||
func (c *ringShards) rebalance() { | |||||
c.mu.RLock() | |||||
shards := c.shards | |||||
c.mu.RUnlock() | |||||
liveShards := make([]string, 0, len(shards)) | |||||
for name, shard := range shards { | |||||
if shard.IsUp() { | |||||
liveShards = append(liveShards, name) | |||||
} | |||||
} | |||||
hash := c.opt.NewConsistentHash(liveShards) | |||||
c.mu.Lock() | |||||
c.hash = hash | |||||
c.numShard = len(liveShards) | |||||
c.mu.Unlock() | |||||
} | |||||
func (c *ringShards) Len() int { | |||||
c.mu.RLock() | |||||
l := c.numShard | |||||
c.mu.RUnlock() | |||||
return l | |||||
} | |||||
func (c *ringShards) Close() error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if c.closed { | |||||
return nil | |||||
} | |||||
c.closed = true | |||||
var firstErr error | |||||
for _, shard := range c.shards { | |||||
if err := shard.Client.Close(); err != nil && firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
} | |||||
c.hash = nil | |||||
c.shards = nil | |||||
c.list = nil | |||||
return firstErr | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type ring struct { | |||||
opt *RingOptions | |||||
shards *ringShards | |||||
cmdsInfoCache *cmdsInfoCache //nolint:structcheck | |||||
} | |||||
// Ring is a Redis client that uses consistent hashing to distribute | |||||
// keys across multiple Redis servers (shards). It's safe for | |||||
// concurrent use by multiple goroutines. | |||||
// | |||||
// Ring monitors the state of each shard and removes dead shards from | |||||
// the ring. When a shard comes online it is added back to the ring. This | |||||
// gives you maximum availability and partition tolerance, but no | |||||
// consistency between different shards or even clients. Each client | |||||
// uses shards that are available to the client and does not do any | |||||
// coordination when shard state is changed. | |||||
// | |||||
// Ring should be used when you need multiple Redis servers for caching | |||||
// and can tolerate losing data when one of the servers dies. | |||||
// Otherwise you should use Redis Cluster. | |||||
type Ring struct { | |||||
*ring | |||||
cmdable | |||||
hooks | |||||
ctx context.Context | |||||
} | |||||
func NewRing(opt *RingOptions) *Ring { | |||||
opt.init() | |||||
ring := Ring{ | |||||
ring: &ring{ | |||||
opt: opt, | |||||
shards: newRingShards(opt), | |||||
}, | |||||
ctx: context.Background(), | |||||
} | |||||
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo) | |||||
ring.cmdable = ring.Process | |||||
go ring.shards.Heartbeat(opt.HeartbeatFrequency) | |||||
return &ring | |||||
} | |||||
func (c *Ring) Context() context.Context { | |||||
return c.ctx | |||||
} | |||||
func (c *Ring) WithContext(ctx context.Context) *Ring { | |||||
if ctx == nil { | |||||
panic("nil context") | |||||
} | |||||
clone := *c | |||||
clone.cmdable = clone.Process | |||||
clone.hooks.lock() | |||||
clone.ctx = ctx | |||||
return &clone | |||||
} | |||||
// Do creates a Cmd from the args and processes the cmd. | |||||
func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd { | |||||
cmd := NewCmd(ctx, args...) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
func (c *Ring) Process(ctx context.Context, cmd Cmder) error { | |||||
return c.hooks.process(ctx, cmd, c.process) | |||||
} | |||||
// Options returns read-only Options that were used to create the client. | |||||
func (c *Ring) Options() *RingOptions { | |||||
return c.opt | |||||
} | |||||
func (c *Ring) retryBackoff(attempt int) time.Duration { | |||||
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | |||||
} | |||||
// PoolStats returns accumulated connection pool stats. | |||||
func (c *Ring) PoolStats() *PoolStats { | |||||
shards := c.shards.List() | |||||
var acc PoolStats | |||||
for _, shard := range shards { | |||||
s := shard.Client.connPool.Stats() | |||||
acc.Hits += s.Hits | |||||
acc.Misses += s.Misses | |||||
acc.Timeouts += s.Timeouts | |||||
acc.TotalConns += s.TotalConns | |||||
acc.IdleConns += s.IdleConns | |||||
} | |||||
return &acc | |||||
} | |||||
// Len returns the current number of shards in the ring. | |||||
func (c *Ring) Len() int { | |||||
return c.shards.Len() | |||||
} | |||||
// Subscribe subscribes the client to the specified channels. | |||||
func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub { | |||||
if len(channels) == 0 { | |||||
panic("at least one channel is required") | |||||
} | |||||
shard, err := c.shards.GetByKey(channels[0]) | |||||
if err != nil { | |||||
// TODO: return PubSub with sticky error | |||||
panic(err) | |||||
} | |||||
return shard.Client.Subscribe(ctx, channels...) | |||||
} | |||||
// PSubscribe subscribes the client to the given patterns. | |||||
func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub { | |||||
if len(channels) == 0 { | |||||
panic("at least one channel is required") | |||||
} | |||||
shard, err := c.shards.GetByKey(channels[0]) | |||||
if err != nil { | |||||
// TODO: return PubSub with sticky error | |||||
panic(err) | |||||
} | |||||
return shard.Client.PSubscribe(ctx, channels...) | |||||
} | |||||
// ForEachShard concurrently calls the fn on each live shard in the ring. | |||||
// It returns the first error if any. | |||||
func (c *Ring) ForEachShard( | |||||
ctx context.Context, | |||||
fn func(ctx context.Context, client *Client) error, | |||||
) error { | |||||
shards := c.shards.List() | |||||
var wg sync.WaitGroup | |||||
errCh := make(chan error, 1) | |||||
for _, shard := range shards { | |||||
if shard.IsDown() { | |||||
continue | |||||
} | |||||
wg.Add(1) | |||||
go func(shard *ringShard) { | |||||
defer wg.Done() | |||||
err := fn(ctx, shard.Client) | |||||
if err != nil { | |||||
select { | |||||
case errCh <- err: | |||||
default: | |||||
} | |||||
} | |||||
}(shard) | |||||
} | |||||
wg.Wait() | |||||
select { | |||||
case err := <-errCh: | |||||
return err | |||||
default: | |||||
return nil | |||||
} | |||||
} | |||||
func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { | |||||
shards := c.shards.List() | |||||
var firstErr error | |||||
for _, shard := range shards { | |||||
cmdsInfo, err := shard.Client.Command(ctx).Result() | |||||
if err == nil { | |||||
return cmdsInfo, nil | |||||
} | |||||
if firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
} | |||||
if firstErr == nil { | |||||
return nil, errRingShardsDown | |||||
} | |||||
return nil, firstErr | |||||
} | |||||
func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo { | |||||
cmdsInfo, err := c.cmdsInfoCache.Get(ctx) | |||||
if err != nil { | |||||
return nil | |||||
} | |||||
info := cmdsInfo[name] | |||||
if info == nil { | |||||
internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) | |||||
} | |||||
return info | |||||
} | |||||
func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { | |||||
cmdInfo := c.cmdInfo(ctx, cmd.Name()) | |||||
pos := cmdFirstKeyPos(cmd, cmdInfo) | |||||
if pos == 0 { | |||||
return c.shards.Random() | |||||
} | |||||
firstKey := cmd.stringArg(pos) | |||||
return c.shards.GetByKey(firstKey) | |||||
} | |||||
func (c *Ring) process(ctx context.Context, cmd Cmder) error { | |||||
var lastErr error | |||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||||
if attempt > 0 { | |||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { | |||||
return err | |||||
} | |||||
} | |||||
shard, err := c.cmdShard(ctx, cmd) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
lastErr = shard.Client.Process(ctx, cmd) | |||||
if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) { | |||||
return lastErr | |||||
} | |||||
} | |||||
return lastErr | |||||
} | |||||
func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.Pipeline().Pipelined(ctx, fn) | |||||
} | |||||
func (c *Ring) Pipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: c.processPipeline, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} | |||||
func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { | |||||
return c.generalProcessPipeline(ctx, cmds, false) | |||||
}) | |||||
} | |||||
func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.TxPipeline().Pipelined(ctx, fn) | |||||
} | |||||
func (c *Ring) TxPipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: c.processTxPipeline, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} | |||||
func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { | |||||
return c.generalProcessPipeline(ctx, cmds, true) | |||||
}) | |||||
} | |||||
func (c *Ring) generalProcessPipeline( | |||||
ctx context.Context, cmds []Cmder, tx bool, | |||||
) error { | |||||
cmdsMap := make(map[string][]Cmder) | |||||
for _, cmd := range cmds { | |||||
cmdInfo := c.cmdInfo(ctx, cmd.Name()) | |||||
hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo)) | |||||
if hash != "" { | |||||
hash = c.shards.Hash(hash) | |||||
} | |||||
cmdsMap[hash] = append(cmdsMap[hash], cmd) | |||||
} | |||||
var wg sync.WaitGroup | |||||
for hash, cmds := range cmdsMap { | |||||
wg.Add(1) | |||||
go func(hash string, cmds []Cmder) { | |||||
defer wg.Done() | |||||
_ = c.processShardPipeline(ctx, hash, cmds, tx) | |||||
}(hash, cmds) | |||||
} | |||||
wg.Wait() | |||||
return cmdsFirstErr(cmds) | |||||
} | |||||
func (c *Ring) processShardPipeline( | |||||
ctx context.Context, hash string, cmds []Cmder, tx bool, | |||||
) error { | |||||
// TODO: retry? | |||||
shard, err := c.shards.GetByName(hash) | |||||
if err != nil { | |||||
setCmdsErr(cmds, err) | |||||
return err | |||||
} | |||||
if tx { | |||||
return shard.Client.processTxPipeline(ctx, cmds) | |||||
} | |||||
return shard.Client.processPipeline(ctx, cmds) | |||||
} | |||||
func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { | |||||
if len(keys) == 0 { | |||||
return fmt.Errorf("redis: Watch requires at least one key") | |||||
} | |||||
var shards []*ringShard | |||||
for _, key := range keys { | |||||
if key != "" { | |||||
shard, err := c.shards.GetByKey(hashtag.Key(key)) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
shards = append(shards, shard) | |||||
} | |||||
} | |||||
if len(shards) == 0 { | |||||
return fmt.Errorf("redis: Watch requires at least one shard") | |||||
} | |||||
if len(shards) > 1 { | |||||
for _, shard := range shards[1:] { | |||||
if shard.Client != shards[0].Client { | |||||
err := fmt.Errorf("redis: Watch requires all keys to be in the same shard") | |||||
return err | |||||
} | |||||
} | |||||
} | |||||
return shards[0].Client.Watch(ctx, fn, keys...) | |||||
} | |||||
// Close closes the ring client, releasing any open resources. | |||||
// | |||||
// It is rare to Close a Ring, as the Ring is meant to be long-lived | |||||
// and shared between many goroutines. | |||||
func (c *Ring) Close() error { | |||||
return c.shards.Close() | |||||
} |
@@ -0,0 +1,65 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"crypto/sha1" | |||||
"encoding/hex" | |||||
"io" | |||||
"strings" | |||||
) | |||||
type Scripter interface { | |||||
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd | |||||
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd | |||||
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd | |||||
ScriptLoad(ctx context.Context, script string) *StringCmd | |||||
} | |||||
var ( | |||||
_ Scripter = (*Client)(nil) | |||||
_ Scripter = (*Ring)(nil) | |||||
_ Scripter = (*ClusterClient)(nil) | |||||
) | |||||
type Script struct { | |||||
src, hash string | |||||
} | |||||
func NewScript(src string) *Script { | |||||
h := sha1.New() | |||||
_, _ = io.WriteString(h, src) | |||||
return &Script{ | |||||
src: src, | |||||
hash: hex.EncodeToString(h.Sum(nil)), | |||||
} | |||||
} | |||||
func (s *Script) Hash() string { | |||||
return s.hash | |||||
} | |||||
func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd { | |||||
return c.ScriptLoad(ctx, s.src) | |||||
} | |||||
func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd { | |||||
return c.ScriptExists(ctx, s.hash) | |||||
} | |||||
func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { | |||||
return c.Eval(ctx, s.src, keys, args...) | |||||
} | |||||
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { | |||||
return c.EvalSha(ctx, s.hash, keys, args...) | |||||
} | |||||
// Run optimistically uses EVALSHA to run the script. If script does not exist | |||||
// it is retried using EVAL. | |||||
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { | |||||
r := s.EvalSha(ctx, c, keys, args...) | |||||
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { | |||||
return s.Eval(ctx, c, keys, args...) | |||||
} | |||||
return r | |||||
} |
@@ -0,0 +1,796 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"crypto/tls" | |||||
"errors" | |||||
"net" | |||||
"strings" | |||||
"sync" | |||||
"time" | |||||
"github.com/go-redis/redis/v8/internal" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
"github.com/go-redis/redis/v8/internal/rand" | |||||
) | |||||
//------------------------------------------------------------------------------ | |||||
// FailoverOptions are used to configure a failover client and should | |||||
// be passed to NewFailoverClient. | |||||
type FailoverOptions struct { | |||||
// The master name. | |||||
MasterName string | |||||
// A seed list of host:port addresses of sentinel nodes. | |||||
SentinelAddrs []string | |||||
// If specified with SentinelPassword, enables ACL-based authentication (via | |||||
// AUTH <user> <pass>). | |||||
SentinelUsername string | |||||
// Sentinel password from "requirepass <password>" (if enabled) in Sentinel | |||||
// configuration, or, if SentinelUsername is also supplied, used for ACL-based | |||||
// authentication. | |||||
SentinelPassword string | |||||
// Allows routing read-only commands to the closest master or slave node. | |||||
// This option only works with NewFailoverClusterClient. | |||||
RouteByLatency bool | |||||
// Allows routing read-only commands to the random master or slave node. | |||||
// This option only works with NewFailoverClusterClient. | |||||
RouteRandomly bool | |||||
// Route all commands to slave read-only nodes. | |||||
SlaveOnly bool | |||||
// Use slaves disconnected with master when cannot get connected slaves | |||||
// Now, this option only works in RandomSlaveAddr function. | |||||
UseDisconnectedSlaves bool | |||||
// Following options are copied from Options struct. | |||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error) | |||||
OnConnect func(ctx context.Context, cn *Conn) error | |||||
Username string | |||||
Password string | |||||
DB int | |||||
MaxRetries int | |||||
MinRetryBackoff time.Duration | |||||
MaxRetryBackoff time.Duration | |||||
DialTimeout time.Duration | |||||
ReadTimeout time.Duration | |||||
WriteTimeout time.Duration | |||||
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). | |||||
PoolFIFO bool | |||||
PoolSize int | |||||
MinIdleConns int | |||||
MaxConnAge time.Duration | |||||
PoolTimeout time.Duration | |||||
IdleTimeout time.Duration | |||||
IdleCheckFrequency time.Duration | |||||
TLSConfig *tls.Config | |||||
} | |||||
func (opt *FailoverOptions) clientOptions() *Options { | |||||
return &Options{ | |||||
Addr: "FailoverClient", | |||||
Dialer: opt.Dialer, | |||||
OnConnect: opt.OnConnect, | |||||
DB: opt.DB, | |||||
Username: opt.Username, | |||||
Password: opt.Password, | |||||
MaxRetries: opt.MaxRetries, | |||||
MinRetryBackoff: opt.MinRetryBackoff, | |||||
MaxRetryBackoff: opt.MaxRetryBackoff, | |||||
DialTimeout: opt.DialTimeout, | |||||
ReadTimeout: opt.ReadTimeout, | |||||
WriteTimeout: opt.WriteTimeout, | |||||
PoolFIFO: opt.PoolFIFO, | |||||
PoolSize: opt.PoolSize, | |||||
PoolTimeout: opt.PoolTimeout, | |||||
IdleTimeout: opt.IdleTimeout, | |||||
IdleCheckFrequency: opt.IdleCheckFrequency, | |||||
MinIdleConns: opt.MinIdleConns, | |||||
MaxConnAge: opt.MaxConnAge, | |||||
TLSConfig: opt.TLSConfig, | |||||
} | |||||
} | |||||
func (opt *FailoverOptions) sentinelOptions(addr string) *Options { | |||||
return &Options{ | |||||
Addr: addr, | |||||
Dialer: opt.Dialer, | |||||
OnConnect: opt.OnConnect, | |||||
DB: 0, | |||||
Username: opt.SentinelUsername, | |||||
Password: opt.SentinelPassword, | |||||
MaxRetries: opt.MaxRetries, | |||||
MinRetryBackoff: opt.MinRetryBackoff, | |||||
MaxRetryBackoff: opt.MaxRetryBackoff, | |||||
DialTimeout: opt.DialTimeout, | |||||
ReadTimeout: opt.ReadTimeout, | |||||
WriteTimeout: opt.WriteTimeout, | |||||
PoolFIFO: opt.PoolFIFO, | |||||
PoolSize: opt.PoolSize, | |||||
PoolTimeout: opt.PoolTimeout, | |||||
IdleTimeout: opt.IdleTimeout, | |||||
IdleCheckFrequency: opt.IdleCheckFrequency, | |||||
MinIdleConns: opt.MinIdleConns, | |||||
MaxConnAge: opt.MaxConnAge, | |||||
TLSConfig: opt.TLSConfig, | |||||
} | |||||
} | |||||
func (opt *FailoverOptions) clusterOptions() *ClusterOptions { | |||||
return &ClusterOptions{ | |||||
Dialer: opt.Dialer, | |||||
OnConnect: opt.OnConnect, | |||||
Username: opt.Username, | |||||
Password: opt.Password, | |||||
MaxRedirects: opt.MaxRetries, | |||||
RouteByLatency: opt.RouteByLatency, | |||||
RouteRandomly: opt.RouteRandomly, | |||||
MinRetryBackoff: opt.MinRetryBackoff, | |||||
MaxRetryBackoff: opt.MaxRetryBackoff, | |||||
DialTimeout: opt.DialTimeout, | |||||
ReadTimeout: opt.ReadTimeout, | |||||
WriteTimeout: opt.WriteTimeout, | |||||
PoolFIFO: opt.PoolFIFO, | |||||
PoolSize: opt.PoolSize, | |||||
PoolTimeout: opt.PoolTimeout, | |||||
IdleTimeout: opt.IdleTimeout, | |||||
IdleCheckFrequency: opt.IdleCheckFrequency, | |||||
MinIdleConns: opt.MinIdleConns, | |||||
MaxConnAge: opt.MaxConnAge, | |||||
TLSConfig: opt.TLSConfig, | |||||
} | |||||
} | |||||
// NewFailoverClient returns a Redis client that uses Redis Sentinel | |||||
// for automatic failover. It's safe for concurrent use by multiple | |||||
// goroutines. | |||||
func NewFailoverClient(failoverOpt *FailoverOptions) *Client { | |||||
if failoverOpt.RouteByLatency { | |||||
panic("to route commands by latency, use NewFailoverClusterClient") | |||||
} | |||||
if failoverOpt.RouteRandomly { | |||||
panic("to route commands randomly, use NewFailoverClusterClient") | |||||
} | |||||
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) | |||||
copy(sentinelAddrs, failoverOpt.SentinelAddrs) | |||||
rand.Shuffle(len(sentinelAddrs), func(i, j int) { | |||||
sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i] | |||||
}) | |||||
failover := &sentinelFailover{ | |||||
opt: failoverOpt, | |||||
sentinelAddrs: sentinelAddrs, | |||||
} | |||||
opt := failoverOpt.clientOptions() | |||||
opt.Dialer = masterSlaveDialer(failover) | |||||
opt.init() | |||||
connPool := newConnPool(opt) | |||||
failover.mu.Lock() | |||||
failover.onFailover = func(ctx context.Context, addr string) { | |||||
_ = connPool.Filter(func(cn *pool.Conn) bool { | |||||
return cn.RemoteAddr().String() != addr | |||||
}) | |||||
} | |||||
failover.mu.Unlock() | |||||
c := Client{ | |||||
baseClient: newBaseClient(opt, connPool), | |||||
ctx: context.Background(), | |||||
} | |||||
c.cmdable = c.Process | |||||
c.onClose = failover.Close | |||||
return &c | |||||
} | |||||
func masterSlaveDialer( | |||||
failover *sentinelFailover, | |||||
) func(ctx context.Context, network, addr string) (net.Conn, error) { | |||||
return func(ctx context.Context, network, _ string) (net.Conn, error) { | |||||
var addr string | |||||
var err error | |||||
if failover.opt.SlaveOnly { | |||||
addr, err = failover.RandomSlaveAddr(ctx) | |||||
} else { | |||||
addr, err = failover.MasterAddr(ctx) | |||||
if err == nil { | |||||
failover.trySwitchMaster(ctx, addr) | |||||
} | |||||
} | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if failover.opt.Dialer != nil { | |||||
return failover.opt.Dialer(ctx, network, addr) | |||||
} | |||||
netDialer := &net.Dialer{ | |||||
Timeout: failover.opt.DialTimeout, | |||||
KeepAlive: 5 * time.Minute, | |||||
} | |||||
if failover.opt.TLSConfig == nil { | |||||
return netDialer.DialContext(ctx, network, addr) | |||||
} | |||||
return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig) | |||||
} | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// SentinelClient is a client for a Redis Sentinel. | |||||
type SentinelClient struct { | |||||
*baseClient | |||||
hooks | |||||
ctx context.Context | |||||
} | |||||
func NewSentinelClient(opt *Options) *SentinelClient { | |||||
opt.init() | |||||
c := &SentinelClient{ | |||||
baseClient: &baseClient{ | |||||
opt: opt, | |||||
connPool: newConnPool(opt), | |||||
}, | |||||
ctx: context.Background(), | |||||
} | |||||
return c | |||||
} | |||||
func (c *SentinelClient) Context() context.Context { | |||||
return c.ctx | |||||
} | |||||
func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient { | |||||
if ctx == nil { | |||||
panic("nil context") | |||||
} | |||||
clone := *c | |||||
clone.ctx = ctx | |||||
return &clone | |||||
} | |||||
func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { | |||||
return c.hooks.process(ctx, cmd, c.baseClient.process) | |||||
} | |||||
func (c *SentinelClient) pubSub() *PubSub { | |||||
pubsub := &PubSub{ | |||||
opt: c.opt, | |||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { | |||||
return c.newConn(ctx) | |||||
}, | |||||
closeConn: c.connPool.CloseConn, | |||||
} | |||||
pubsub.init() | |||||
return pubsub | |||||
} | |||||
// Ping is used to test if a connection is still alive, or to | |||||
// measure latency. | |||||
func (c *SentinelClient) Ping(ctx context.Context) *StringCmd { | |||||
cmd := NewStringCmd(ctx, "ping") | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Subscribe subscribes the client to the specified channels. | |||||
// Channels can be omitted to create empty subscription. | |||||
func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub { | |||||
pubsub := c.pubSub() | |||||
if len(channels) > 0 { | |||||
_ = pubsub.Subscribe(ctx, channels...) | |||||
} | |||||
return pubsub | |||||
} | |||||
// PSubscribe subscribes the client to the given patterns. | |||||
// Patterns can be omitted to create empty subscription. | |||||
func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { | |||||
pubsub := c.pubSub() | |||||
if len(channels) > 0 { | |||||
_ = pubsub.PSubscribe(ctx, channels...) | |||||
} | |||||
return pubsub | |||||
} | |||||
func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd { | |||||
cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd { | |||||
cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Failover forces a failover as if the master was not reachable, and without | |||||
// asking for agreement to other Sentinels. | |||||
func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd { | |||||
cmd := NewStatusCmd(ctx, "sentinel", "failover", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Reset resets all the masters with matching name. The pattern argument is a | |||||
// glob-style pattern. The reset process clears any previous state in a master | |||||
// (including a failover in progress), and removes every slave and sentinel | |||||
// already discovered and associated with the master. | |||||
func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd { | |||||
cmd := NewIntCmd(ctx, "sentinel", "reset", pattern) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// FlushConfig forces Sentinel to rewrite its configuration on disk, including | |||||
// the current Sentinel state. | |||||
func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd { | |||||
cmd := NewStatusCmd(ctx, "sentinel", "flushconfig") | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Master shows the state and info of the specified master. | |||||
func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd { | |||||
cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Masters shows a list of monitored masters and their state. | |||||
func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd { | |||||
cmd := NewSliceCmd(ctx, "sentinel", "masters") | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Slaves shows a list of slaves for the specified master and their state. | |||||
func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd { | |||||
cmd := NewSliceCmd(ctx, "sentinel", "slaves", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// CkQuorum checks if the current Sentinel configuration is able to reach the | |||||
// quorum needed to failover a master, and the majority needed to authorize the | |||||
// failover. This command should be used in monitoring systems to check if a | |||||
// Sentinel deployment is ok. | |||||
func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd { | |||||
cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Monitor tells the Sentinel to start monitoring a new master with the specified | |||||
// name, ip, port, and quorum. | |||||
func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd { | |||||
cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Set is used in order to change configuration parameters of a specific master. | |||||
func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd { | |||||
cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Remove is used in order to remove the specified master: the master will no | |||||
// longer be monitored, and will totally be removed from the internal state of | |||||
// the Sentinel. | |||||
func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd { | |||||
cmd := NewStringCmd(ctx, "sentinel", "remove", name) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
type sentinelFailover struct { | |||||
opt *FailoverOptions | |||||
sentinelAddrs []string | |||||
onFailover func(ctx context.Context, addr string) | |||||
onUpdate func(ctx context.Context) | |||||
mu sync.RWMutex | |||||
_masterAddr string | |||||
sentinel *SentinelClient | |||||
pubsub *PubSub | |||||
} | |||||
func (c *sentinelFailover) Close() error { | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if c.sentinel != nil { | |||||
return c.closeSentinel() | |||||
} | |||||
return nil | |||||
} | |||||
func (c *sentinelFailover) closeSentinel() error { | |||||
firstErr := c.pubsub.Close() | |||||
c.pubsub = nil | |||||
err := c.sentinel.Close() | |||||
if err != nil && firstErr == nil { | |||||
firstErr = err | |||||
} | |||||
c.sentinel = nil | |||||
return firstErr | |||||
} | |||||
func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) { | |||||
if c.opt == nil { | |||||
return "", errors.New("opt is nil") | |||||
} | |||||
addresses, err := c.slaveAddrs(ctx, false) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
if len(addresses) == 0 && c.opt.UseDisconnectedSlaves { | |||||
addresses, err = c.slaveAddrs(ctx, true) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
} | |||||
if len(addresses) == 0 { | |||||
return c.MasterAddr(ctx) | |||||
} | |||||
return addresses[rand.Intn(len(addresses))], nil | |||||
} | |||||
func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { | |||||
c.mu.RLock() | |||||
sentinel := c.sentinel | |||||
c.mu.RUnlock() | |||||
if sentinel != nil { | |||||
addr := c.getMasterAddr(ctx, sentinel) | |||||
if addr != "" { | |||||
return addr, nil | |||||
} | |||||
} | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if c.sentinel != nil { | |||||
addr := c.getMasterAddr(ctx, c.sentinel) | |||||
if addr != "" { | |||||
return addr, nil | |||||
} | |||||
_ = c.closeSentinel() | |||||
} | |||||
for i, sentinelAddr := range c.sentinelAddrs { | |||||
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) | |||||
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() | |||||
if err != nil { | |||||
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", | |||||
c.opt.MasterName, err) | |||||
_ = sentinel.Close() | |||||
continue | |||||
} | |||||
// Push working sentinel to the top. | |||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] | |||||
c.setSentinel(ctx, sentinel) | |||||
addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) | |||||
return addr, nil | |||||
} | |||||
return "", errors.New("redis: all sentinels specified in configuration are unreachable") | |||||
} | |||||
func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { | |||||
c.mu.RLock() | |||||
sentinel := c.sentinel | |||||
c.mu.RUnlock() | |||||
if sentinel != nil { | |||||
addrs := c.getSlaveAddrs(ctx, sentinel) | |||||
if len(addrs) > 0 { | |||||
return addrs, nil | |||||
} | |||||
} | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if c.sentinel != nil { | |||||
addrs := c.getSlaveAddrs(ctx, c.sentinel) | |||||
if len(addrs) > 0 { | |||||
return addrs, nil | |||||
} | |||||
_ = c.closeSentinel() | |||||
} | |||||
var sentinelReachable bool | |||||
for i, sentinelAddr := range c.sentinelAddrs { | |||||
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) | |||||
slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() | |||||
if err != nil { | |||||
internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s", | |||||
c.opt.MasterName, err) | |||||
_ = sentinel.Close() | |||||
continue | |||||
} | |||||
sentinelReachable = true | |||||
addrs := parseSlaveAddrs(slaves, useDisconnected) | |||||
if len(addrs) == 0 { | |||||
continue | |||||
} | |||||
// Push working sentinel to the top. | |||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] | |||||
c.setSentinel(ctx, sentinel) | |||||
return addrs, nil | |||||
} | |||||
if sentinelReachable { | |||||
return []string{}, nil | |||||
} | |||||
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable") | |||||
} | |||||
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string { | |||||
addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() | |||||
if err != nil { | |||||
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s", | |||||
c.opt.MasterName, err) | |||||
return "" | |||||
} | |||||
return net.JoinHostPort(addr[0], addr[1]) | |||||
} | |||||
func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string { | |||||
addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() | |||||
if err != nil { | |||||
internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s", | |||||
c.opt.MasterName, err) | |||||
return []string{} | |||||
} | |||||
return parseSlaveAddrs(addrs, false) | |||||
} | |||||
func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string { | |||||
nodes := make([]string, 0, len(addrs)) | |||||
for _, node := range addrs { | |||||
ip := "" | |||||
port := "" | |||||
flags := []string{} | |||||
lastkey := "" | |||||
isDown := false | |||||
for _, key := range node.([]interface{}) { | |||||
switch lastkey { | |||||
case "ip": | |||||
ip = key.(string) | |||||
case "port": | |||||
port = key.(string) | |||||
case "flags": | |||||
flags = strings.Split(key.(string), ",") | |||||
} | |||||
lastkey = key.(string) | |||||
} | |||||
for _, flag := range flags { | |||||
switch flag { | |||||
case "s_down", "o_down": | |||||
isDown = true | |||||
case "disconnected": | |||||
if !keepDisconnected { | |||||
isDown = true | |||||
} | |||||
} | |||||
} | |||||
if !isDown { | |||||
nodes = append(nodes, net.JoinHostPort(ip, port)) | |||||
} | |||||
} | |||||
return nodes | |||||
} | |||||
func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { | |||||
c.mu.RLock() | |||||
currentAddr := c._masterAddr //nolint:ifshort | |||||
c.mu.RUnlock() | |||||
if addr == currentAddr { | |||||
return | |||||
} | |||||
c.mu.Lock() | |||||
defer c.mu.Unlock() | |||||
if addr == c._masterAddr { | |||||
return | |||||
} | |||||
c._masterAddr = addr | |||||
internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q", | |||||
c.opt.MasterName, addr) | |||||
if c.onFailover != nil { | |||||
c.onFailover(ctx, addr) | |||||
} | |||||
} | |||||
func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) { | |||||
if c.sentinel != nil { | |||||
panic("not reached") | |||||
} | |||||
c.sentinel = sentinel | |||||
c.discoverSentinels(ctx) | |||||
c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done") | |||||
go c.listen(c.pubsub) | |||||
} | |||||
func (c *sentinelFailover) discoverSentinels(ctx context.Context) { | |||||
sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result() | |||||
if err != nil { | |||||
internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err) | |||||
return | |||||
} | |||||
for _, sentinel := range sentinels { | |||||
vals := sentinel.([]interface{}) | |||||
var ip, port string | |||||
for i := 0; i < len(vals); i += 2 { | |||||
key := vals[i].(string) | |||||
switch key { | |||||
case "ip": | |||||
ip = vals[i+1].(string) | |||||
case "port": | |||||
port = vals[i+1].(string) | |||||
} | |||||
} | |||||
if ip != "" && port != "" { | |||||
sentinelAddr := net.JoinHostPort(ip, port) | |||||
if !contains(c.sentinelAddrs, sentinelAddr) { | |||||
internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q", | |||||
sentinelAddr, c.opt.MasterName) | |||||
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
func (c *sentinelFailover) listen(pubsub *PubSub) { | |||||
ctx := context.TODO() | |||||
if c.onUpdate != nil { | |||||
c.onUpdate(ctx) | |||||
} | |||||
ch := pubsub.Channel() | |||||
for msg := range ch { | |||||
if msg.Channel == "+switch-master" { | |||||
parts := strings.Split(msg.Payload, " ") | |||||
if parts[0] != c.opt.MasterName { | |||||
internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0]) | |||||
continue | |||||
} | |||||
addr := net.JoinHostPort(parts[3], parts[4]) | |||||
c.trySwitchMaster(pubsub.getContext(), addr) | |||||
} | |||||
if c.onUpdate != nil { | |||||
c.onUpdate(ctx) | |||||
} | |||||
} | |||||
} | |||||
func contains(slice []string, str string) bool { | |||||
for _, s := range slice { | |||||
if s == str { | |||||
return true | |||||
} | |||||
} | |||||
return false | |||||
} | |||||
//------------------------------------------------------------------------------ | |||||
// NewFailoverClusterClient returns a client that supports routing read-only commands | |||||
// to a slave node. | |||||
func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { | |||||
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) | |||||
copy(sentinelAddrs, failoverOpt.SentinelAddrs) | |||||
failover := &sentinelFailover{ | |||||
opt: failoverOpt, | |||||
sentinelAddrs: sentinelAddrs, | |||||
} | |||||
opt := failoverOpt.clusterOptions() | |||||
opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { | |||||
masterAddr, err := failover.MasterAddr(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
nodes := []ClusterNode{{ | |||||
Addr: masterAddr, | |||||
}} | |||||
slaveAddrs, err := failover.slaveAddrs(ctx, false) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
for _, slaveAddr := range slaveAddrs { | |||||
nodes = append(nodes, ClusterNode{ | |||||
Addr: slaveAddr, | |||||
}) | |||||
} | |||||
slots := []ClusterSlot{ | |||||
{ | |||||
Start: 0, | |||||
End: 16383, | |||||
Nodes: nodes, | |||||
}, | |||||
} | |||||
return slots, nil | |||||
} | |||||
c := NewClusterClient(opt) | |||||
failover.mu.Lock() | |||||
failover.onUpdate = func(ctx context.Context) { | |||||
c.ReloadState(ctx) | |||||
} | |||||
failover.mu.Unlock() | |||||
return c | |||||
} |
@@ -0,0 +1,149 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"github.com/go-redis/redis/v8/internal/pool" | |||||
"github.com/go-redis/redis/v8/internal/proto" | |||||
) | |||||
// TxFailedErr transaction redis failed. | |||||
const TxFailedErr = proto.RedisError("redis: transaction failed") | |||||
// Tx implements Redis transactions as described in | |||||
// http://redis.io/topics/transactions. It's NOT safe for concurrent use | |||||
// by multiple goroutines, because Exec resets list of watched keys. | |||||
// | |||||
// If you don't need WATCH, use Pipeline instead. | |||||
type Tx struct { | |||||
baseClient | |||||
cmdable | |||||
statefulCmdable | |||||
hooks | |||||
ctx context.Context | |||||
} | |||||
func (c *Client) newTx(ctx context.Context) *Tx { | |||||
tx := Tx{ | |||||
baseClient: baseClient{ | |||||
opt: c.opt, | |||||
connPool: pool.NewStickyConnPool(c.connPool), | |||||
}, | |||||
hooks: c.hooks.clone(), | |||||
ctx: ctx, | |||||
} | |||||
tx.init() | |||||
return &tx | |||||
} | |||||
func (c *Tx) init() { | |||||
c.cmdable = c.Process | |||||
c.statefulCmdable = c.Process | |||||
} | |||||
func (c *Tx) Context() context.Context { | |||||
return c.ctx | |||||
} | |||||
func (c *Tx) WithContext(ctx context.Context) *Tx { | |||||
if ctx == nil { | |||||
panic("nil context") | |||||
} | |||||
clone := *c | |||||
clone.init() | |||||
clone.hooks.lock() | |||||
clone.ctx = ctx | |||||
return &clone | |||||
} | |||||
func (c *Tx) Process(ctx context.Context, cmd Cmder) error { | |||||
return c.hooks.process(ctx, cmd, c.baseClient.process) | |||||
} | |||||
// Watch prepares a transaction and marks the keys to be watched | |||||
// for conditional execution if there are any keys. | |||||
// | |||||
// The transaction is automatically closed when fn exits. | |||||
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { | |||||
tx := c.newTx(ctx) | |||||
defer tx.Close(ctx) | |||||
if len(keys) > 0 { | |||||
if err := tx.Watch(ctx, keys...).Err(); err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return fn(tx) | |||||
} | |||||
// Close closes the transaction, releasing any open resources. | |||||
func (c *Tx) Close(ctx context.Context) error { | |||||
_ = c.Unwatch(ctx).Err() | |||||
return c.baseClient.Close() | |||||
} | |||||
// Watch marks the keys to be watched for conditional execution | |||||
// of a transaction. | |||||
func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd { | |||||
args := make([]interface{}, 1+len(keys)) | |||||
args[0] = "watch" | |||||
for i, key := range keys { | |||||
args[1+i] = key | |||||
} | |||||
cmd := NewStatusCmd(ctx, args...) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Unwatch flushes all the previously watched keys for a transaction. | |||||
func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd { | |||||
args := make([]interface{}, 1+len(keys)) | |||||
args[0] = "unwatch" | |||||
for i, key := range keys { | |||||
args[1+i] = key | |||||
} | |||||
cmd := NewStatusCmd(ctx, args...) | |||||
_ = c.Process(ctx, cmd) | |||||
return cmd | |||||
} | |||||
// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined. | |||||
func (c *Tx) Pipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: func(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) | |||||
}, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} | |||||
// Pipelined executes commands queued in the fn outside of the transaction. | |||||
// Use TxPipelined if you need transactional behavior. | |||||
func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.Pipeline().Pipelined(ctx, fn) | |||||
} | |||||
// TxPipelined executes commands queued in the fn in the transaction. | |||||
// | |||||
// When using WATCH, EXEC will execute commands only if the watched keys | |||||
// were not modified, allowing for a check-and-set mechanism. | |||||
// | |||||
// Exec always returns list of commands. If transaction fails | |||||
// TxFailedErr is returned. Otherwise Exec returns an error of the first | |||||
// failed command or nil. | |||||
func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | |||||
return c.TxPipeline().Pipelined(ctx, fn) | |||||
} | |||||
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined. | |||||
func (c *Tx) TxPipeline() Pipeliner { | |||||
pipe := Pipeline{ | |||||
ctx: c.ctx, | |||||
exec: func(ctx context.Context, cmds []Cmder) error { | |||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) | |||||
}, | |||||
} | |||||
pipe.init() | |||||
return &pipe | |||||
} |
@@ -0,0 +1,213 @@ | |||||
package redis | |||||
import ( | |||||
"context" | |||||
"crypto/tls" | |||||
"net" | |||||
"time" | |||||
) | |||||
// UniversalOptions information is required by UniversalClient to establish | |||||
// connections. | |||||
type UniversalOptions struct { | |||||
// Either a single address or a seed list of host:port addresses | |||||
// of cluster/sentinel nodes. | |||||
Addrs []string | |||||
// Database to be selected after connecting to the server. | |||||
// Only single-node and failover clients. | |||||
DB int | |||||
// Common options. | |||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error) | |||||
OnConnect func(ctx context.Context, cn *Conn) error | |||||
Username string | |||||
Password string | |||||
SentinelPassword string | |||||
MaxRetries int | |||||
MinRetryBackoff time.Duration | |||||
MaxRetryBackoff time.Duration | |||||
DialTimeout time.Duration | |||||
ReadTimeout time.Duration | |||||
WriteTimeout time.Duration | |||||
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). | |||||
PoolFIFO bool | |||||
PoolSize int | |||||
MinIdleConns int | |||||
MaxConnAge time.Duration | |||||
PoolTimeout time.Duration | |||||
IdleTimeout time.Duration | |||||
IdleCheckFrequency time.Duration | |||||
TLSConfig *tls.Config | |||||
// Only cluster clients. | |||||
MaxRedirects int | |||||
ReadOnly bool | |||||
RouteByLatency bool | |||||
RouteRandomly bool | |||||
// The sentinel master name. | |||||
// Only failover clients. | |||||
MasterName string | |||||
} | |||||
// Cluster returns cluster options created from the universal options. | |||||
func (o *UniversalOptions) Cluster() *ClusterOptions { | |||||
if len(o.Addrs) == 0 { | |||||
o.Addrs = []string{"127.0.0.1:6379"} | |||||
} | |||||
return &ClusterOptions{ | |||||
Addrs: o.Addrs, | |||||
Dialer: o.Dialer, | |||||
OnConnect: o.OnConnect, | |||||
Username: o.Username, | |||||
Password: o.Password, | |||||
MaxRedirects: o.MaxRedirects, | |||||
ReadOnly: o.ReadOnly, | |||||
RouteByLatency: o.RouteByLatency, | |||||
RouteRandomly: o.RouteRandomly, | |||||
MaxRetries: o.MaxRetries, | |||||
MinRetryBackoff: o.MinRetryBackoff, | |||||
MaxRetryBackoff: o.MaxRetryBackoff, | |||||
DialTimeout: o.DialTimeout, | |||||
ReadTimeout: o.ReadTimeout, | |||||
WriteTimeout: o.WriteTimeout, | |||||
PoolFIFO: o.PoolFIFO, | |||||
PoolSize: o.PoolSize, | |||||
MinIdleConns: o.MinIdleConns, | |||||
MaxConnAge: o.MaxConnAge, | |||||
PoolTimeout: o.PoolTimeout, | |||||
IdleTimeout: o.IdleTimeout, | |||||
IdleCheckFrequency: o.IdleCheckFrequency, | |||||
TLSConfig: o.TLSConfig, | |||||
} | |||||
} | |||||
// Failover returns failover options created from the universal options. | |||||
func (o *UniversalOptions) Failover() *FailoverOptions { | |||||
if len(o.Addrs) == 0 { | |||||
o.Addrs = []string{"127.0.0.1:26379"} | |||||
} | |||||
return &FailoverOptions{ | |||||
SentinelAddrs: o.Addrs, | |||||
MasterName: o.MasterName, | |||||
Dialer: o.Dialer, | |||||
OnConnect: o.OnConnect, | |||||
DB: o.DB, | |||||
Username: o.Username, | |||||
Password: o.Password, | |||||
SentinelPassword: o.SentinelPassword, | |||||
MaxRetries: o.MaxRetries, | |||||
MinRetryBackoff: o.MinRetryBackoff, | |||||
MaxRetryBackoff: o.MaxRetryBackoff, | |||||
DialTimeout: o.DialTimeout, | |||||
ReadTimeout: o.ReadTimeout, | |||||
WriteTimeout: o.WriteTimeout, | |||||
PoolFIFO: o.PoolFIFO, | |||||
PoolSize: o.PoolSize, | |||||
MinIdleConns: o.MinIdleConns, | |||||
MaxConnAge: o.MaxConnAge, | |||||
PoolTimeout: o.PoolTimeout, | |||||
IdleTimeout: o.IdleTimeout, | |||||
IdleCheckFrequency: o.IdleCheckFrequency, | |||||
TLSConfig: o.TLSConfig, | |||||
} | |||||
} | |||||
// Simple returns basic options created from the universal options. | |||||
func (o *UniversalOptions) Simple() *Options { | |||||
addr := "127.0.0.1:6379" | |||||
if len(o.Addrs) > 0 { | |||||
addr = o.Addrs[0] | |||||
} | |||||
return &Options{ | |||||
Addr: addr, | |||||
Dialer: o.Dialer, | |||||
OnConnect: o.OnConnect, | |||||
DB: o.DB, | |||||
Username: o.Username, | |||||
Password: o.Password, | |||||
MaxRetries: o.MaxRetries, | |||||
MinRetryBackoff: o.MinRetryBackoff, | |||||
MaxRetryBackoff: o.MaxRetryBackoff, | |||||
DialTimeout: o.DialTimeout, | |||||
ReadTimeout: o.ReadTimeout, | |||||
WriteTimeout: o.WriteTimeout, | |||||
PoolFIFO: o.PoolFIFO, | |||||
PoolSize: o.PoolSize, | |||||
MinIdleConns: o.MinIdleConns, | |||||
MaxConnAge: o.MaxConnAge, | |||||
PoolTimeout: o.PoolTimeout, | |||||
IdleTimeout: o.IdleTimeout, | |||||
IdleCheckFrequency: o.IdleCheckFrequency, | |||||
TLSConfig: o.TLSConfig, | |||||
} | |||||
} | |||||
// -------------------------------------------------------------------- | |||||
// UniversalClient is an abstract client which - based on the provided options - | |||||
// represents either a ClusterClient, a FailoverClient, or a single-node Client. | |||||
// This can be useful for testing cluster-specific applications locally or having different | |||||
// clients in different environments. | |||||
type UniversalClient interface { | |||||
Cmdable | |||||
Context() context.Context | |||||
AddHook(Hook) | |||||
Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error | |||||
Do(ctx context.Context, args ...interface{}) *Cmd | |||||
Process(ctx context.Context, cmd Cmder) error | |||||
Subscribe(ctx context.Context, channels ...string) *PubSub | |||||
PSubscribe(ctx context.Context, channels ...string) *PubSub | |||||
Close() error | |||||
PoolStats() *PoolStats | |||||
} | |||||
var ( | |||||
_ UniversalClient = (*Client)(nil) | |||||
_ UniversalClient = (*ClusterClient)(nil) | |||||
_ UniversalClient = (*Ring)(nil) | |||||
) | |||||
// NewUniversalClient returns a new multi client. The type of the returned client depends | |||||
// on the following conditions: | |||||
// | |||||
// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned. | |||||
// 2. if the number of Addrs is two or more, a ClusterClient is returned. | |||||
// 3. Otherwise, a single-node Client is returned. | |||||
func NewUniversalClient(opts *UniversalOptions) UniversalClient { | |||||
if opts.MasterName != "" { | |||||
return NewFailoverClient(opts.Failover()) | |||||
} else if len(opts.Addrs) > 1 { | |||||
return NewClusterClient(opts.Cluster()) | |||||
} | |||||
return NewClient(opts.Simple()) | |||||
} |
@@ -0,0 +1,6 @@ | |||||
package redis | |||||
// Version is the current release version. | |||||
func Version() string { | |||||
return "8.11.4" | |||||
} |
@@ -0,0 +1,21 @@ | |||||
MIT License | |||||
Copyright (c) 2017 NSQ Authors | |||||
Permission is hereby granted, free of charge, to any person obtaining a copy | |||||
of this software and associated documentation files (the "Software"), to deal | |||||
in the Software without restriction, including without limitation the rights | |||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||||
copies of the Software, and to permit persons to whom the Software is | |||||
furnished to do so, subject to the following conditions: | |||||
The above copyright notice and this permission notice shall be included in all | |||||
copies or substantial portions of the Software. | |||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||||
SOFTWARE. |
@@ -0,0 +1,7 @@ | |||||
# go-diskqueue | |||||
[![Build Status](https://github.com/nsqio/go-diskqueue/workflows/tests/badge.svg)](https://github.com/nsqio/go-diskqueue/actions) [![Go Reference](https://pkg.go.dev/badge/github.com/nsqio/go-diskqueue.svg)](https://pkg.go.dev/github.com/nsqio/go-diskqueue) [![GitHub release](https://img.shields.io/github/release/nsqio/go-diskqueue.svg)](https://github.com/nsqio/go-diskqueue/releases/latest) | |||||
A Go package providing a filesystem-backed FIFO queue | |||||
Pulled out of https://github.com/nsqio/nsq |
@@ -0,0 +1,712 @@ | |||||
package diskqueue | |||||
import ( | |||||
"bufio" | |||||
"bytes" | |||||
"encoding/binary" | |||||
"errors" | |||||
"fmt" | |||||
"io" | |||||
"math/rand" | |||||
"os" | |||||
"path" | |||||
"sync" | |||||
"time" | |||||
) | |||||
// logging stuff copied from github.com/nsqio/nsq/internal/lg | |||||
type LogLevel int | |||||
const ( | |||||
DEBUG = LogLevel(1) | |||||
INFO = LogLevel(2) | |||||
WARN = LogLevel(3) | |||||
ERROR = LogLevel(4) | |||||
FATAL = LogLevel(5) | |||||
) | |||||
type AppLogFunc func(lvl LogLevel, f string, args ...interface{}) | |||||
func (l LogLevel) String() string { | |||||
switch l { | |||||
case 1: | |||||
return "DEBUG" | |||||
case 2: | |||||
return "INFO" | |||||
case 3: | |||||
return "WARNING" | |||||
case 4: | |||||
return "ERROR" | |||||
case 5: | |||||
return "FATAL" | |||||
} | |||||
panic("invalid LogLevel") | |||||
} | |||||
type Interface interface { | |||||
Put([]byte) error | |||||
ReadChan() <-chan []byte // this is expected to be an *unbuffered* channel | |||||
Close() error | |||||
Delete() error | |||||
Depth() int64 | |||||
Empty() error | |||||
} | |||||
// diskQueue implements a filesystem backed FIFO queue | |||||
type diskQueue struct { | |||||
// 64bit atomic vars need to be first for proper alignment on 32bit platforms | |||||
// run-time state (also persisted to disk) | |||||
readPos int64 | |||||
writePos int64 | |||||
readFileNum int64 | |||||
writeFileNum int64 | |||||
depth int64 | |||||
sync.RWMutex | |||||
// instantiation time metadata | |||||
name string | |||||
dataPath string | |||||
maxBytesPerFile int64 // cannot change once created | |||||
maxBytesPerFileRead int64 | |||||
minMsgSize int32 | |||||
maxMsgSize int32 | |||||
syncEvery int64 // number of writes per fsync | |||||
syncTimeout time.Duration // duration of time per fsync | |||||
exitFlag int32 | |||||
needSync bool | |||||
// keeps track of the position where we have read | |||||
// (but not yet sent over readChan) | |||||
nextReadPos int64 | |||||
nextReadFileNum int64 | |||||
readFile *os.File | |||||
writeFile *os.File | |||||
reader *bufio.Reader | |||||
writeBuf bytes.Buffer | |||||
// exposed via ReadChan() | |||||
readChan chan []byte | |||||
// internal channels | |||||
depthChan chan int64 | |||||
writeChan chan []byte | |||||
writeResponseChan chan error | |||||
emptyChan chan int | |||||
emptyResponseChan chan error | |||||
exitChan chan int | |||||
exitSyncChan chan int | |||||
logf AppLogFunc | |||||
} | |||||
// New instantiates an instance of diskQueue, retrieving metadata | |||||
// from the filesystem and starting the read ahead goroutine | |||||
func New(name string, dataPath string, maxBytesPerFile int64, | |||||
minMsgSize int32, maxMsgSize int32, | |||||
syncEvery int64, syncTimeout time.Duration, logf AppLogFunc) Interface { | |||||
d := diskQueue{ | |||||
name: name, | |||||
dataPath: dataPath, | |||||
maxBytesPerFile: maxBytesPerFile, | |||||
minMsgSize: minMsgSize, | |||||
maxMsgSize: maxMsgSize, | |||||
readChan: make(chan []byte), | |||||
depthChan: make(chan int64), | |||||
writeChan: make(chan []byte), | |||||
writeResponseChan: make(chan error), | |||||
emptyChan: make(chan int), | |||||
emptyResponseChan: make(chan error), | |||||
exitChan: make(chan int), | |||||
exitSyncChan: make(chan int), | |||||
syncEvery: syncEvery, | |||||
syncTimeout: syncTimeout, | |||||
logf: logf, | |||||
} | |||||
// no need to lock here, nothing else could possibly be touching this instance | |||||
err := d.retrieveMetaData() | |||||
if err != nil && !os.IsNotExist(err) { | |||||
d.logf(ERROR, "DISKQUEUE(%s) failed to retrieveMetaData - %s", d.name, err) | |||||
} | |||||
go d.ioLoop() | |||||
return &d | |||||
} | |||||
// Depth returns the depth of the queue | |||||
func (d *diskQueue) Depth() int64 { | |||||
depth, ok := <-d.depthChan | |||||
if !ok { | |||||
// ioLoop exited | |||||
depth = d.depth | |||||
} | |||||
return depth | |||||
} | |||||
// ReadChan returns the receive-only []byte channel for reading data | |||||
func (d *diskQueue) ReadChan() <-chan []byte { | |||||
return d.readChan | |||||
} | |||||
// Put writes a []byte to the queue | |||||
func (d *diskQueue) Put(data []byte) error { | |||||
d.RLock() | |||||
defer d.RUnlock() | |||||
if d.exitFlag == 1 { | |||||
return errors.New("exiting") | |||||
} | |||||
d.writeChan <- data | |||||
return <-d.writeResponseChan | |||||
} | |||||
// Close cleans up the queue and persists metadata | |||||
func (d *diskQueue) Close() error { | |||||
err := d.exit(false) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return d.sync() | |||||
} | |||||
func (d *diskQueue) Delete() error { | |||||
return d.exit(true) | |||||
} | |||||
func (d *diskQueue) exit(deleted bool) error { | |||||
d.Lock() | |||||
defer d.Unlock() | |||||
d.exitFlag = 1 | |||||
if deleted { | |||||
d.logf(INFO, "DISKQUEUE(%s): deleting", d.name) | |||||
} else { | |||||
d.logf(INFO, "DISKQUEUE(%s): closing", d.name) | |||||
} | |||||
close(d.exitChan) | |||||
// ensure that ioLoop has exited | |||||
<-d.exitSyncChan | |||||
close(d.depthChan) | |||||
if d.readFile != nil { | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
} | |||||
if d.writeFile != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
} | |||||
return nil | |||||
} | |||||
// Empty destructively clears out any pending data in the queue | |||||
// by fast forwarding read positions and removing intermediate files | |||||
func (d *diskQueue) Empty() error { | |||||
d.RLock() | |||||
defer d.RUnlock() | |||||
if d.exitFlag == 1 { | |||||
return errors.New("exiting") | |||||
} | |||||
d.logf(INFO, "DISKQUEUE(%s): emptying", d.name) | |||||
d.emptyChan <- 1 | |||||
return <-d.emptyResponseChan | |||||
} | |||||
func (d *diskQueue) deleteAllFiles() error { | |||||
err := d.skipToNextRWFile() | |||||
innerErr := os.Remove(d.metaDataFileName()) | |||||
if innerErr != nil && !os.IsNotExist(innerErr) { | |||||
d.logf(ERROR, "DISKQUEUE(%s) failed to remove metadata file - %s", d.name, innerErr) | |||||
return innerErr | |||||
} | |||||
return err | |||||
} | |||||
func (d *diskQueue) skipToNextRWFile() error { | |||||
var err error | |||||
if d.readFile != nil { | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
} | |||||
if d.writeFile != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
} | |||||
for i := d.readFileNum; i <= d.writeFileNum; i++ { | |||||
fn := d.fileName(i) | |||||
innerErr := os.Remove(fn) | |||||
if innerErr != nil && !os.IsNotExist(innerErr) { | |||||
d.logf(ERROR, "DISKQUEUE(%s) failed to remove data file - %s", d.name, innerErr) | |||||
err = innerErr | |||||
} | |||||
} | |||||
d.writeFileNum++ | |||||
d.writePos = 0 | |||||
d.readFileNum = d.writeFileNum | |||||
d.readPos = 0 | |||||
d.nextReadFileNum = d.writeFileNum | |||||
d.nextReadPos = 0 | |||||
d.depth = 0 | |||||
return err | |||||
} | |||||
// readOne performs a low level filesystem read for a single []byte | |||||
// while advancing read positions and rolling files, if necessary | |||||
func (d *diskQueue) readOne() ([]byte, error) { | |||||
var err error | |||||
var msgSize int32 | |||||
if d.readFile == nil { | |||||
curFileName := d.fileName(d.readFileNum) | |||||
d.readFile, err = os.OpenFile(curFileName, os.O_RDONLY, 0600) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
d.logf(INFO, "DISKQUEUE(%s): readOne() opened %s", d.name, curFileName) | |||||
if d.readPos > 0 { | |||||
_, err = d.readFile.Seek(d.readPos, 0) | |||||
if err != nil { | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
return nil, err | |||||
} | |||||
} | |||||
// for "complete" files (i.e. not the "current" file), maxBytesPerFileRead | |||||
// should be initialized to the file's size, or default to maxBytesPerFile | |||||
d.maxBytesPerFileRead = d.maxBytesPerFile | |||||
if d.readFileNum < d.writeFileNum { | |||||
stat, err := d.readFile.Stat() | |||||
if err == nil { | |||||
d.maxBytesPerFileRead = stat.Size() | |||||
} | |||||
} | |||||
d.reader = bufio.NewReader(d.readFile) | |||||
} | |||||
err = binary.Read(d.reader, binary.BigEndian, &msgSize) | |||||
if err != nil { | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
return nil, err | |||||
} | |||||
if msgSize < d.minMsgSize || msgSize > d.maxMsgSize { | |||||
// this file is corrupt and we have no reasonable guarantee on | |||||
// where a new message should begin | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
return nil, fmt.Errorf("invalid message read size (%d)", msgSize) | |||||
} | |||||
readBuf := make([]byte, msgSize) | |||||
_, err = io.ReadFull(d.reader, readBuf) | |||||
if err != nil { | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
return nil, err | |||||
} | |||||
totalBytes := int64(4 + msgSize) | |||||
// we only advance next* because we have not yet sent this to consumers | |||||
// (where readFileNum, readPos will actually be advanced) | |||||
d.nextReadPos = d.readPos + totalBytes | |||||
d.nextReadFileNum = d.readFileNum | |||||
// we only consider rotating if we're reading a "complete" file | |||||
// and since we cannot know the size at which it was rotated, we | |||||
// rely on maxBytesPerFileRead rather than maxBytesPerFile | |||||
if d.readFileNum < d.writeFileNum && d.nextReadPos >= d.maxBytesPerFileRead { | |||||
if d.readFile != nil { | |||||
d.readFile.Close() | |||||
d.readFile = nil | |||||
} | |||||
d.nextReadFileNum++ | |||||
d.nextReadPos = 0 | |||||
} | |||||
return readBuf, nil | |||||
} | |||||
// writeOne performs a low level filesystem write for a single []byte | |||||
// while advancing write positions and rolling files, if necessary | |||||
func (d *diskQueue) writeOne(data []byte) error { | |||||
var err error | |||||
dataLen := int32(len(data)) | |||||
totalBytes := int64(4 + dataLen) | |||||
if dataLen < d.minMsgSize || dataLen > d.maxMsgSize { | |||||
return fmt.Errorf("invalid message write size (%d) minMsgSize=%d maxMsgSize=%d", dataLen, d.minMsgSize, d.maxMsgSize) | |||||
} | |||||
// will not wrap-around if maxBytesPerFile + maxMsgSize < Int64Max | |||||
if d.writePos > 0 && d.writePos+totalBytes > d.maxBytesPerFile { | |||||
if d.readFileNum == d.writeFileNum { | |||||
d.maxBytesPerFileRead = d.writePos | |||||
} | |||||
d.writeFileNum++ | |||||
d.writePos = 0 | |||||
// sync every time we start writing to a new file | |||||
err = d.sync() | |||||
if err != nil { | |||||
d.logf(ERROR, "DISKQUEUE(%s) failed to sync - %s", d.name, err) | |||||
} | |||||
if d.writeFile != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
} | |||||
} | |||||
if d.writeFile == nil { | |||||
curFileName := d.fileName(d.writeFileNum) | |||||
d.writeFile, err = os.OpenFile(curFileName, os.O_RDWR|os.O_CREATE, 0600) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
d.logf(INFO, "DISKQUEUE(%s): writeOne() opened %s", d.name, curFileName) | |||||
if d.writePos > 0 { | |||||
_, err = d.writeFile.Seek(d.writePos, 0) | |||||
if err != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
return err | |||||
} | |||||
} | |||||
} | |||||
d.writeBuf.Reset() | |||||
err = binary.Write(&d.writeBuf, binary.BigEndian, dataLen) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
_, err = d.writeBuf.Write(data) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// only write to the file once | |||||
_, err = d.writeFile.Write(d.writeBuf.Bytes()) | |||||
if err != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
return err | |||||
} | |||||
d.writePos += totalBytes | |||||
d.depth += 1 | |||||
return err | |||||
} | |||||
// sync fsyncs the current writeFile and persists metadata | |||||
func (d *diskQueue) sync() error { | |||||
if d.writeFile != nil { | |||||
err := d.writeFile.Sync() | |||||
if err != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
return err | |||||
} | |||||
} | |||||
err := d.persistMetaData() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
d.needSync = false | |||||
return nil | |||||
} | |||||
// retrieveMetaData initializes state from the filesystem | |||||
func (d *diskQueue) retrieveMetaData() error { | |||||
var f *os.File | |||||
var err error | |||||
fileName := d.metaDataFileName() | |||||
f, err = os.OpenFile(fileName, os.O_RDONLY, 0600) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
defer f.Close() | |||||
var depth int64 | |||||
_, err = fmt.Fscanf(f, "%d\n%d,%d\n%d,%d\n", | |||||
&depth, | |||||
&d.readFileNum, &d.readPos, | |||||
&d.writeFileNum, &d.writePos) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
d.depth = depth | |||||
d.nextReadFileNum = d.readFileNum | |||||
d.nextReadPos = d.readPos | |||||
// if the metadata was not sync'd at the last shutdown of nsqd | |||||
// then the actual file size might actually be larger than the writePos, | |||||
// in which case the safest thing to do is skip to the next file for | |||||
// writes, and let the reader salvage what it can from the messages in the | |||||
// diskqueue beyond the metadata's likely also stale readPos | |||||
fileName = d.fileName(d.writeFileNum) | |||||
fileInfo, err := os.Stat(fileName) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
fileSize := fileInfo.Size() | |||||
if d.writePos < fileSize { | |||||
d.logf(WARN, | |||||
"DISKQUEUE(%s) %s metadata writePos %d < file size of %d, skipping to new file", | |||||
d.name, fileName, d.writePos, fileSize) | |||||
d.writeFileNum += 1 | |||||
d.writePos = 0 | |||||
if d.writeFile != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
// persistMetaData atomically writes state to the filesystem | |||||
func (d *diskQueue) persistMetaData() error { | |||||
var f *os.File | |||||
var err error | |||||
fileName := d.metaDataFileName() | |||||
tmpFileName := fmt.Sprintf("%s.%d.tmp", fileName, rand.Int()) | |||||
// write to tmp file | |||||
f, err = os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE, 0600) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
_, err = fmt.Fprintf(f, "%d\n%d,%d\n%d,%d\n", | |||||
d.depth, | |||||
d.readFileNum, d.readPos, | |||||
d.writeFileNum, d.writePos) | |||||
if err != nil { | |||||
f.Close() | |||||
return err | |||||
} | |||||
f.Sync() | |||||
f.Close() | |||||
// atomically rename | |||||
return os.Rename(tmpFileName, fileName) | |||||
} | |||||
func (d *diskQueue) metaDataFileName() string { | |||||
return fmt.Sprintf(path.Join(d.dataPath, "%s.diskqueue.meta.dat"), d.name) | |||||
} | |||||
func (d *diskQueue) fileName(fileNum int64) string { | |||||
return fmt.Sprintf(path.Join(d.dataPath, "%s.diskqueue.%06d.dat"), d.name, fileNum) | |||||
} | |||||
func (d *diskQueue) checkTailCorruption(depth int64) { | |||||
if d.readFileNum < d.writeFileNum || d.readPos < d.writePos { | |||||
return | |||||
} | |||||
// we've reached the end of the diskqueue | |||||
// if depth isn't 0 something went wrong | |||||
if depth != 0 { | |||||
if depth < 0 { | |||||
d.logf(ERROR, | |||||
"DISKQUEUE(%s) negative depth at tail (%d), metadata corruption, resetting 0...", | |||||
d.name, depth) | |||||
} else if depth > 0 { | |||||
d.logf(ERROR, | |||||
"DISKQUEUE(%s) positive depth at tail (%d), data loss, resetting 0...", | |||||
d.name, depth) | |||||
} | |||||
// force set depth 0 | |||||
d.depth = 0 | |||||
d.needSync = true | |||||
} | |||||
if d.readFileNum != d.writeFileNum || d.readPos != d.writePos { | |||||
if d.readFileNum > d.writeFileNum { | |||||
d.logf(ERROR, | |||||
"DISKQUEUE(%s) readFileNum > writeFileNum (%d > %d), corruption, skipping to next writeFileNum and resetting 0...", | |||||
d.name, d.readFileNum, d.writeFileNum) | |||||
} | |||||
if d.readPos > d.writePos { | |||||
d.logf(ERROR, | |||||
"DISKQUEUE(%s) readPos > writePos (%d > %d), corruption, skipping to next writeFileNum and resetting 0...", | |||||
d.name, d.readPos, d.writePos) | |||||
} | |||||
d.skipToNextRWFile() | |||||
d.needSync = true | |||||
} | |||||
} | |||||
func (d *diskQueue) moveForward() { | |||||
oldReadFileNum := d.readFileNum | |||||
d.readFileNum = d.nextReadFileNum | |||||
d.readPos = d.nextReadPos | |||||
d.depth -= 1 | |||||
// see if we need to clean up the old file | |||||
if oldReadFileNum != d.nextReadFileNum { | |||||
// sync every time we start reading from a new file | |||||
d.needSync = true | |||||
fn := d.fileName(oldReadFileNum) | |||||
err := os.Remove(fn) | |||||
if err != nil { | |||||
d.logf(ERROR, "DISKQUEUE(%s) failed to Remove(%s) - %s", d.name, fn, err) | |||||
} | |||||
} | |||||
d.checkTailCorruption(d.depth) | |||||
} | |||||
func (d *diskQueue) handleReadError() { | |||||
// jump to the next read file and rename the current (bad) file | |||||
if d.readFileNum == d.writeFileNum { | |||||
// if you can't properly read from the current write file it's safe to | |||||
// assume that something is fucked and we should skip the current file too | |||||
if d.writeFile != nil { | |||||
d.writeFile.Close() | |||||
d.writeFile = nil | |||||
} | |||||
d.writeFileNum++ | |||||
d.writePos = 0 | |||||
} | |||||
badFn := d.fileName(d.readFileNum) | |||||
badRenameFn := badFn + ".bad" | |||||
d.logf(WARN, | |||||
"DISKQUEUE(%s) jump to next file and saving bad file as %s", | |||||
d.name, badRenameFn) | |||||
err := os.Rename(badFn, badRenameFn) | |||||
if err != nil { | |||||
d.logf(ERROR, | |||||
"DISKQUEUE(%s) failed to rename bad diskqueue file %s to %s", | |||||
d.name, badFn, badRenameFn) | |||||
} | |||||
d.readFileNum++ | |||||
d.readPos = 0 | |||||
d.nextReadFileNum = d.readFileNum | |||||
d.nextReadPos = 0 | |||||
// significant state change, schedule a sync on the next iteration | |||||
d.needSync = true | |||||
d.checkTailCorruption(d.depth) | |||||
} | |||||
// ioLoop provides the backend for exposing a go channel (via ReadChan()) | |||||
// in support of multiple concurrent queue consumers | |||||
// | |||||
// it works by looping and branching based on whether or not the queue has data | |||||
// to read and blocking until data is either read or written over the appropriate | |||||
// go channels | |||||
// | |||||
// conveniently this also means that we're asynchronously reading from the filesystem | |||||
func (d *diskQueue) ioLoop() { | |||||
var dataRead []byte | |||||
var err error | |||||
var count int64 | |||||
var r chan []byte | |||||
syncTicker := time.NewTicker(d.syncTimeout) | |||||
for { | |||||
// dont sync all the time :) | |||||
if count == d.syncEvery { | |||||
d.needSync = true | |||||
} | |||||
if d.needSync { | |||||
err = d.sync() | |||||
if err != nil { | |||||
d.logf(ERROR, "DISKQUEUE(%s) failed to sync - %s", d.name, err) | |||||
} | |||||
count = 0 | |||||
} | |||||
if (d.readFileNum < d.writeFileNum) || (d.readPos < d.writePos) { | |||||
if d.nextReadPos == d.readPos { | |||||
dataRead, err = d.readOne() | |||||
if err != nil { | |||||
d.logf(ERROR, "DISKQUEUE(%s) reading at %d of %s - %s", | |||||
d.name, d.readPos, d.fileName(d.readFileNum), err) | |||||
d.handleReadError() | |||||
continue | |||||
} | |||||
} | |||||
r = d.readChan | |||||
} else { | |||||
r = nil | |||||
} | |||||
select { | |||||
// the Go channel spec dictates that nil channel operations (read or write) | |||||
// in a select are skipped, we set r to d.readChan only when there is data to read | |||||
case r <- dataRead: | |||||
count++ | |||||
// moveForward sets needSync flag if a file is removed | |||||
d.moveForward() | |||||
case d.depthChan <- d.depth: | |||||
case <-d.emptyChan: | |||||
d.emptyResponseChan <- d.deleteAllFiles() | |||||
count = 0 | |||||
case dataWrite := <-d.writeChan: | |||||
count++ | |||||
d.writeResponseChan <- d.writeOne(dataWrite) | |||||
case <-syncTicker.C: | |||||
if count == 0 { | |||||
// avoid sync when there's no activity | |||||
continue | |||||
} | |||||
d.needSync = true | |||||
case <-d.exitChan: | |||||
goto exit | |||||
} | |||||
} | |||||
exit: | |||||
d.logf(INFO, "DISKQUEUE(%s): closing ... ioLoop", d.name) | |||||
syncTicker.Stop() | |||||
d.exitSyncChan <- 1 | |||||
} |
@@ -0,0 +1,19 @@ | |||||
# github.com/cespare/xxhash/v2 v2.1.2 | |||||
## explicit; go 1.11 | |||||
github.com/cespare/xxhash/v2 | |||||
# github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f | |||||
## explicit | |||||
github.com/dgryski/go-rendezvous | |||||
# github.com/go-redis/redis/v8 v8.11.4 | |||||
## explicit; go 1.13 | |||||
github.com/go-redis/redis/v8 | |||||
github.com/go-redis/redis/v8/internal | |||||
github.com/go-redis/redis/v8/internal/hashtag | |||||
github.com/go-redis/redis/v8/internal/hscan | |||||
github.com/go-redis/redis/v8/internal/pool | |||||
github.com/go-redis/redis/v8/internal/proto | |||||
github.com/go-redis/redis/v8/internal/rand | |||||
github.com/go-redis/redis/v8/internal/util | |||||
# github.com/nsqio/go-diskqueue v1.1.0 | |||||
## explicit; go 1.13 | |||||
github.com/nsqio/go-diskqueue |