[cache] optimize Redis implementation

This commit is contained in:
Aleksandr Soloshenko 2025-09-19 06:33:29 +07:00 committed by Aleksandr
parent a74d41f078
commit 6fad3c1e2d
3 changed files with 29 additions and 19 deletions

View File

@ -15,11 +15,11 @@ const (
type Cache = cache.Cache type Cache = cache.Cache
type Factory interface { type Factory interface {
New(name string) (cache.Cache, error) New(name string) (Cache, error)
} }
type factory struct { type factory struct {
new func(name string) (cache.Cache, error) new func(name string) (Cache, error)
} }
func NewFactory(config Config) (Factory, error) { func NewFactory(config Config) (Factory, error) {
@ -35,7 +35,7 @@ func NewFactory(config Config) (Factory, error) {
switch u.Scheme { switch u.Scheme {
case "memory": case "memory":
return &factory{ return &factory{
new: func(name string) (cache.Cache, error) { new: func(name string) (Cache, error) {
return cache.NewMemory(0), nil return cache.NewMemory(0), nil
}, },
}, nil }, nil
@ -45,7 +45,7 @@ func NewFactory(config Config) (Factory, error) {
return nil, fmt.Errorf("can't create redis client: %w", err) return nil, fmt.Errorf("can't create redis client: %w", err)
} }
return &factory{ return &factory{
new: func(name string) (cache.Cache, error) { new: func(name string) (Cache, error) {
return cache.NewRedis(client, name, 0), nil return cache.NewRedis(client, name, 0), nil
}, },
}, nil }, nil
@ -55,6 +55,6 @@ func NewFactory(config Config) (Factory, error) {
} }
// New implements Factory. // New implements Factory.
func (f *factory) New(name string) (cache.Cache, error) { func (f *factory) New(name string) (Cache, error) {
return f.new(keyPrefix + name) return f.new(keyPrefix + name)
} }

View File

@ -179,6 +179,7 @@ func BenchmarkMemoryCache_ConcurrentReads(b *testing.B) {
for _, bm := range benchmarks { for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) { b.Run(bm.name, func(b *testing.B) {
b.SetParallelism(bm.goroutines)
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
@ -206,6 +207,7 @@ func BenchmarkMemoryCache_ConcurrentWrites(b *testing.B) {
for _, bm := range benchmarks { for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) { b.Run(bm.name, func(b *testing.B) {
b.SetParallelism(bm.goroutines)
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
i := 0 i := 0
@ -238,6 +240,7 @@ func BenchmarkMemoryCache_MixedWorkload(b *testing.B) {
for _, bm := range benchmarks { for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) { b.Run(bm.name, func(b *testing.B) {
b.SetParallelism(bm.goroutines)
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
r := rand.New(rand.NewSource(time.Now().UnixNano())) r := rand.New(rand.NewSource(time.Now().UnixNano()))
@ -287,6 +290,7 @@ func BenchmarkMemoryCache_Scaling(b *testing.B) {
cache.Set(ctx, key, value) cache.Set(ctx, key, value)
} }
b.SetParallelism(bm.goroutines)
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
localI := 0 localI := 0
@ -355,12 +359,17 @@ func BenchmarkMemoryCache_LargeValues(b *testing.B) {
for i := range value { for i := range value {
value[i] = byte(i % 256) value[i] = byte(i % 256)
} }
valueStr := string(value)
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
cache.Set(ctx, key, string(value)) if err := cache.Set(ctx, key, valueStr); err != nil {
cache.Get(ctx, key) b.Fatal(err)
}
if _, err := cache.Get(ctx, key); err != nil {
b.Fatal(err)
}
} }
}) })
}) })
@ -371,6 +380,7 @@ func BenchmarkMemoryCache_LargeValues(b *testing.B) {
func BenchmarkMemoryCache_MemoryGrowth(b *testing.B) { func BenchmarkMemoryCache_MemoryGrowth(b *testing.B) {
cache := cache.NewMemory(0) cache := cache.NewMemory(0)
ctx := context.Background() ctx := context.Background()
b.ReportAllocs()
sizes := []int{100, 1000, 10000, 100000} sizes := []int{100, 1000, 10000, 100000}
@ -378,12 +388,12 @@ func BenchmarkMemoryCache_MemoryGrowth(b *testing.B) {
b.Run(fmt.Sprintf("%d_items", size), func(b *testing.B) { b.Run(fmt.Sprintf("%d_items", size), func(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
// Clear cache // Clear cache
cache.Drain(ctx) cache.Drain(ctx)
// Add new items // Add new items
for j := 0; j < size; j++ { for j := range size {
key := "key-" + strconv.Itoa(j) key := "key-" + strconv.Itoa(j)
value := "value-" + strconv.Itoa(j) value := "value-" + strconv.Itoa(j)
cache.Set(ctx, key, value) cache.Set(ctx, key, value)

20
pkg/cache/redis.go vendored
View File

@ -26,9 +26,8 @@ end
hgetallAndDeleteScript = ` hgetallAndDeleteScript = `
local items = redis.call('HGETALL', KEYS[1]) local items = redis.call('HGETALL', KEYS[1])
if #items > 0 then if #items > 0 then
for i = 1, #items, 2 do local ok = pcall(redis.call, 'UNLINK', KEYS[1])
redis.call('HDEL', KEYS[1], items[i]) if not ok then redis.call('DEL', KEYS[1]) end
end
end end
return items return items
` `
@ -128,14 +127,15 @@ func (r *redisCache) Set(ctx context.Context, key string, value string, opts ...
} }
options.apply(opts...) options.apply(opts...)
if err := r.client.HSet(ctx, r.key, key, value).Err(); err != nil { _, err := r.client.Pipelined(ctx, func(p redis.Pipeliner) error {
return fmt.Errorf("can't set cache item: %w", err) p.HSet(ctx, r.key, key, value)
} if !options.validUntil.IsZero() {
p.HExpireAt(ctx, r.key, options.validUntil, key)
if !options.validUntil.IsZero() {
if err := r.client.HExpireAt(ctx, r.key, options.validUntil, key).Err(); err != nil {
return fmt.Errorf("can't set cache item ttl: %w", err)
} }
return nil
})
if err != nil {
return fmt.Errorf("can't set cache item: %w", err)
} }
return nil return nil