Redis 的理论性能:
| 指标 | 数值 |
|---|
| QPS(简单命令) | 10W+/s |
| QPS(复杂命令) | 1W+/s |
| 读写延迟 | 微秒级(<1ms) |
| 网络带宽 | 可达 1GB/s |
redis-benchmark -h 127.0.0.1 -p 6379 -c 100 -n 100000
redis-benchmark -h 127.0.0.1 -p 6379 -t set -c 100 -n 100000 -d 100
redis-benchmark -h 127.0.0.1 -p 6379 -t set -c 100 -n 100000 -P 16
redis-benchmark -h 127.0.0.1 -p 6379 --eval "return redis.call('set','key','value')"
KEYS *
FLUSHALL
FLUSHDB
CONFIG
rename-command KEYS ""
rename-command FLUSHALL ""
rename-command FLUSHDB ""
rename-command CONFIG "CONFIG_b840fc02"
SCAN 0 MATCH "user:*" COUNT 100
public Set<String> scanKeys(String pattern) {
Set<String> keys = new HashSet<>();
ScanOptions options = ScanOptions.scanOptions()
.match(pattern)
.count(100)
.build();
try (Cursor<byte[]> cursor = redisTemplate.getConnectionFactory()
.getConnection().scan(options)) {
while (cursor.hasNext()) {
keys.add(new String(cursor.next()));
}
}
return keys;
}
for (int i = 0; i < 100; i++) {
redisTemplate.opsForValue().set("key:" + i, "value:" + i);
}
redisTemplate.executePipelined((RedisCallback<Object>) connection -> {
for (int i = 0; i < 100; i++) {
connection.set(("key:" + i).getBytes(), ("value:" + i).getBytes());
}
return null;
});
String luaScript =
"local current = redis.call('get', KEYS[1]) " +
"if current == ARGV[1] then " +
" redis.call('set', KEYS[1], ARGV[2]) " +
" return 1 " +
"else " +
" return 0 " +
"end";
DefaultRedisScript<Long> script = new DefaultRedisScript<>();
script.setScriptText(luaScript);
script.setResultType(Long.class);
Long result = redisTemplate.execute(script,
Collections.singletonList("key"),
"expectedValue", "newValue");
SET user:1001 '{"name":"张三","age":25,"email":"test@example.com"}'
HSET user:1001 name "张三" age 25 email "test@example.com"
GET counter
SET counter (value + 1)
INCR counter
ZADD ranking 100 user1 200 user2 150 user3
ZREVRANGE ranking 0 9 WITHSCORES
user:1001:info
product:2001:stock
very_very_very_long_key_name_for_user_information_storage_12345
redis-cli --bigkeys
redis-cli --memkeys
rdb --command memory dump.rdb --bytes 10240
# 配置压缩列表阈值
# Hash
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# List(Redis 3.2+ 使用 quicklist)
list-max-ziplist-size -2
list-compress-depth 0
# Set(只对整数有效)
set-max-intset-entries 512
# Sorted Set
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
public void setWithExpire(String key, Object value, long expireSeconds) {
long randomOffset = ThreadLocalRandom.current().nextLong(0, expireSeconds / 10);
redisTemplate.opsForValue().set(key, value,
expireSeconds + randomOffset, TimeUnit.SECONDS);
}
CONFIG GET maxmemory-policy
maxmemory-policy allkeys-lru
INFO stats | grep expired_keys
@Configuration
public class RedisConfig {
@Bean
public LettuceConnectionFactory redisConnectionFactory() {
GenericObjectPoolConfig<Object> poolConfig = new GenericObjectPoolConfig<>();
poolConfig.setMaxTotal(100);
poolConfig.setMaxIdle(50);
poolConfig.setMinIdle(10);
poolConfig.setMaxWaitMillis(3000);
poolConfig.setTestOnBorrow(true);
poolConfig.setTestWhileIdle(true);
LettucePoolingClientConfiguration clientConfig = LettucePoolingClientConfiguration.builder()
.poolConfig(poolConfig)
.commandTimeout(Duration.ofSeconds(3))
.build();
RedisStandaloneConfiguration serverConfig = new RedisStandaloneConfiguration();
serverConfig.setHostName("localhost");
serverConfig.setPort(6379);
serverConfig.setPassword(RedisPassword.of("password"));
return new LettuceConnectionFactory(serverConfig, clientConfig);
}
}
# Redis 配置
tcp-backlog 511 # TCP 连接队列长度
tcp-keepalive 300 # TCP keepalive
# Linux 系统配置
# /etc/sysctl.conf
net.core.somaxconn = 1024
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 300
@Configuration
public class CacheConfig {
@Bean
public CacheManager cacheManager(LettuceConnectionFactory connectionFactory) {
CaffeineCacheManager localCacheManager = new CaffeineCacheManager();
localCacheManager.setCaffeine(Caffeine.newBuilder()
.maximumSize(10000)
.expireAfterWrite(5, TimeUnit.MINUTES));
RedisCacheManager redisCacheManager = RedisCacheManager.builder(connectionFactory)
.cacheDefaults(RedisCacheConfiguration.defaultCacheConfig()
.entryTtl(Duration.ofHours(1)))
.build();
return new CompositeCacheManager(localCacheManager, redisCacheManager);
}
}
# 调整自动保存频率
save 900 1
save 300 100
save 60 10000
# 禁用 RDB(如果使用 AOF)
# save ""
# 后台保存失败时停止写入
stop-writes-on-bgsave-error yes
# RDB 文件压缩
rdbcompression yes
# RDB 文件校验
rdbchecksum yes
# 开启 AOF
appendonly yes
# 同步策略
appendfsync everysec
# 重写期间禁止 fsync(提高性能,但可能丢数据)
no-appendfsync-on-rewrite no
# 自动重写条件
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# 使用混合持久化(Redis 4.0+)
aof-use-rdb-preamble yes
redis-cli INFO stats | grep latest_fork_usec
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
sysctl -p
maxmemory 4gb
redis-cli INFO
redis-cli INFO server
redis-cli INFO clients
redis-cli INFO memory
redis-cli INFO persistence
redis-cli INFO stats
redis-cli INFO replication
redis-cli INFO cpu
redis-cli INFO cluster
redis-cli INFO keyspace
used_memory
used_memory_rss
mem_fragmentation_ratio
maxmemory
evicted_keys
connected_clients
blocked_clients
rejected_connections
instantaneous_ops_per_sec
hit_rate = keyspace_hits / (keyspace_hits + keyspace_misses)
rdb_last_bgsave_status
aof_last_rewrite_status
aof_current_size
master_link_status
master_last_io_seconds_ago
slowlog-log-slower-than 10000
slowlog-max-len 128
SLOWLOG GET 10
SLOWLOG LEN
SLOWLOG RESET
version: "3"
services:
redis-exporter:
image: oliver006/redis_exporter
ports:
- "9121:9121"
environment:
- REDIS_ADDR=redis://localhost:6379
- REDIS_PASSWORD=your_password
prometheus:
image: prom/prometheus
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
grafana:
image: grafana/grafana
ports:
- "3000:3000"
scrape_configs:
- job_name: "redis"
static_configs:
- targets: ["redis-exporter:9121"]
groups:
- name: redis
rules:
- alert: RedisMemoryHigh
expr: redis_memory_used_bytes / redis_memory_max_bytes > 0.8
for: 5m
labels:
severity: warning
annotations:
summary: "Redis memory usage is high"
- alert: RedisTooManyConnections
expr: redis_connected_clients > 500
for: 5m
labels:
severity: warning
- alert: RedisHitRateLow
expr: redis_keyspace_hits / (redis_keyspace_hits + redis_keyspace_misses) < 0.9
for: 10m
labels:
severity: warning
- alert: RedisReplicationLag
expr: redis_connected_slaves < 1 or redis_master_link_status != 1
for: 1m
labels:
severity: critical
tail -f /var/log/redis/redis.log
iptables -L -n | grep 6379
grep bind /etc/redis/redis.conf
grep protected-mode /etc/redis/redis.conf
redis-cli SLOWLOG GET 10
redis-cli --bigkeys
redis-cli CLIENT LIST | grep idle
redis-cli INFO memory
redis-cli INFO stats | grep evicted
redis-cli MEMORY DOCTOR
redis-cli CLIENT LIST
redis-cli INFO clients
redis-cli CLIENT KILL IDLE 3600
timeout 300
redis-cli MEMORY STATS
redis-cli MEMORY USAGE key [SAMPLES count]
redis-cli MEMORY DOCTOR
redis-cli --bigkeys
redis-cli --memkeys
rdb --command memory dump.rdb --bytes 10240 > memory-report.csv
redis-cli MONITOR
redis-cli INFO commandstats
redis-cli --latency
redis-cli --latency-history
redis-cli --latency-dist
redis-cli --intrinsic-latency 5
redis-cli INFO persistence
vm.overcommit_memory = 1
vm.swappiness = 1
net.core.somaxconn = 1024
net.ipv4.tcp_max_syn_backlog = 1024
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
* soft nofile 65535
* hard nofile 65535
useradd -r -s /sbin/nologin redis
chown -R redis:redis /data/redis
#!/bin/bash
BACKUP_DIR="/backup/redis"
DATE=$(date +%Y%m%d_%H%M%S)
REDIS_DATA_DIR="/data/redis"
mkdir -p $BACKUP_DIR
redis-cli -a password BGSAVE
while [ $(redis-cli -a password LASTSAVE) == $(cat /tmp/redis_lastsave 2>/dev/null) ]; do
sleep 1
done
redis-cli -a password LASTSAVE > /tmp/redis_lastsave
cp $REDIS_DATA_DIR/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
gzip $BACKUP_DIR/dump_$DATE.rdb
find $BACKUP_DIR -name "*.gz" -mtime +7 -delete
echo "Backup completed: dump_$DATE.rdb.gz"
# 1. 绑定内网 IP
bind 192.168.1.10
# 2. 设置密码
requirepass your_strong_password_here
# 3. 禁用危险命令
rename-command FLUSHALL ""
rename-command FLUSHDB ""
rename-command CONFIG "CONFIG_secret_key"
rename-command KEYS ""
rename-command DEBUG ""
rename-command SHUTDOWN "SHUTDOWN_secret_key"
# 4. 开启保护模式
protected-mode yes
# 5. 禁止外网访问(防火墙)
# iptables -A INPUT -p tcp --dport 6379 -s 192.168.1.0/24 -j ACCEPT
# iptables -A INPUT -p tcp --dport 6379 -j DROP
# 6. 使用 TLS(Redis 6.0+)
tls-port 6380
tls-cert-file /path/to/redis.crt
tls-key-file /path/to/redis.key
tls-ca-cert-file /path/to/ca.crt
redis-cli PING
redis-cli CONFIG GET *
redis-cli CONFIG GET maxmemory
redis-cli CONFIG SET maxmemory 4gb
redis-cli CONFIG SET maxmemory-policy allkeys-lru
redis-cli CONFIG REWRITE
redis-cli CLIENT LIST | wc -l
redis-cli CLIENT KILL IDLE 3600
redis-cli INFO memory | head -20
redis-cli DBSIZE
redis-cli SLOWLOG GET 10
redis-cli BGSAVE
redis-cli BGREWRITEAOF
| 配置项 | 推荐值 | 说明 |
|---|
| maxmemory | 物理内存的 50-70% | 预留给 fork 和系统 |
| maxmemory-policy | allkeys-lru | 根据场景选择 |
| maxclients | 10000 | 根据业务调整 |
| timeout | 300 | 空闲连接超时 |
| tcp-keepalive | 300 | TCP 保活 |
| tcp-backlog | 511 | TCP 队列 |
| appendonly | yes | 开启 AOF |
| appendfsync | everysec | 每秒同步 |
| aof-use-rdb-preamble | yes | 混合持久化 |
| slowlog-log-slower-than | 10000 | 10ms |
| slowlog-max-len | 128 | 保留条数 |
| 规范 | 说明 |
|---|
| Key 命名 | 业务:类型:ID,不超过 44 字节 |
| Value 大小 | String 不超过 10KB |
| 集合元素 | Hash/List/Set/ZSet 元素不超过 5000 |
| 过期时间 | 所有缓存必须设置过期时间 |
| 批量操作 | 使用 Pipeline 或 Lua 脚本 |
| 禁用命令 | KEYS、FLUSHALL、FLUSHDB |
| 连接池 | 使用连接池,配置合理的参数 |
| 指标 | 告警阈值 | 说明 |
|---|
| 内存使用率 | > 80% | 及时扩容或清理 |
| 连接数 | > 80% maxclients | 检查连接泄漏 |
| 命中率 | < 90% | 优化缓存策略 |
| 淘汰 key 数 | > 0 | 内存不足 |
| 慢查询数 | > 100/min | 优化命令 |
| 主从延迟 | > 10s | 检查网络和负载 |
| fork 耗时 | > 500ms | 优化持久化配置 |
- 命令优化:避免慢命令,使用 Pipeline 和 Lua
- 内存优化:控制 Key/Value 大小,使用压缩列表
- 网络优化:使用连接池,合理配置连接参数
- 持久化优化:根据场景选择 RDB/AOF,使用混合持久化
- 监控全面:内存、连接、性能、持久化
- 告警及时:设置合理的告警阈值
- 备份可靠:定期备份,异地存储
- 安全加固:密码、防火墙、禁用危险命令
- 连接问题:检查网络、连接数、超时配置
- 内存问题:检查大 key、淘汰策略、碎片
- 性能问题:检查慢查询、持久化、CPU 使用
- 数据问题:检查持久化状态、主从同步