背景在一台未经过任何调优的 Linux 服务器上部署 Redis在 Redis 启动过程中可能会碰到以下警告信息。1363410:M 15 Jan 2026 13:07:34.879 # WARNING: The TCP backlog setting of 512 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. 1363410:M 15 Jan 2026 13:07:34.879 # Server initialized 1363410:M 15 Jan 2026 13:07:34.879 # WARNING Memory overcommit must be enabled! Without it, a background save or replication may fail under low memory condition. Being disabled, it can can also cause failures without low memory condition, see https://github.com/jemalloc/jemalloc/issues/1328. To fix this issue add vm.overcommit_memory 1 to /etc/sysctl.conf and then reboot or run the command sysctl vm.overcommit_memory1 for this to take effect. 1363410:M 15 Jan 2026 13:07:34.879 # WARNING You have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command echo madvise /sys/kernel/mm/transparent_hugepage/enabled as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled (set to madvise or never).这些警告信息实际上是在提醒我们操作系统的某些参数设置得不合理需要调整否则会影响 Redis 的性能和稳定性。除此之外Redis 还提供了多个参数用于在进程或连接级别进行内核参数优化例如tcp-backlog设置 TCP 服务器中listen()的 backlog 参数。disable-thp在进程级别关闭透明大页THP。tcp-keepalive在连接级别设置 TCP Keepalive 参数。server_cpulist、bio_cpulist可将 Redis 进程或后台 I/O 线程绑定到指定 CPU。oom-score-adj、oom-score-adj-values调整进程的 oom_score。oom_score 是 Linux 内核为每个进程计算的一个整数值位于/proc/[pid]/oom_score分数越高进程在内存不足时越容易被 OOM Killer 杀死。下面我们看看这些参数的实现细节和设置建议。tcp-backlogcreateIntConfig(tcp-backlog, NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */tcp-backlog 用于设置 TCP 服务器在调用listen()时使用的 backlog 参数。该参数用于指定已完成三次握手、但尚未被accept()处理的连接队列长度。下面是一个典型的 TCP 服务器流程// 1. 创建 socket int fd socket(AF_INET, SOCK_STREAM, 0); // 2. 绑定 IP 端口 bind(fd, ...); // 3. 开始监听 listen(fd, backlog); // 4. 接受连接 int connfd accept(fd, ...);当客户端发起连接时TCP 三次握手完成后连接会被放入已完成连接队列accept queue其长度由listen(backlog)控制。服务器通过accept()从队列中取走连接交给应用层处理。当已完成连接队列已满时新完成三次握手的连接无法进入 accept queue内核通常会丢弃或延迟 ACK导致客户端出现连接超时、重传。tcp-backlog 的默认值为 511对应的内存变量为 server.tcp_backlog。static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog) { // 将 socket 绑定到指定的 IP 和端口 if (bind(s,sa,len) -1) { anetSetError(err, bind: %s, strerror(errno)); close(s); return ANET_ERR; } // 将 socket 设置为监听状态开始接收客户端连接backlog 指定内核允许排队的最大未处理连接数 if (listen(s, backlog) -1) { anetSetError(err, listen: %s, strerror(errno)); close(s); return ANET_ERR; } return ANET_OK; }实例在启动时会调用checkTcpBacklogSettings()检查 server.tcp_backlog 的配置是否合理。void checkTcpBacklogSettings(void) { #if defined(HAVE_PROC_SOMAXCONN) FILE *fp fopen(/proc/sys/net/core/somaxconn,r); char buf[1024]; if (!fp) return; if (fgets(buf,sizeof(buf),fp) ! NULL) { int somaxconn atoi(buf); if (somaxconn 0 somaxconn server.tcp_backlog) { serverLog(LL_WARNING,WARNING: The TCP backlog setting of %d cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of %d., server.tcp_backlog, somaxconn); } } fclose(fp); ... #endif }具体实现上它会获取/proc/sys/net/core/somaxconn的值。somaxconn 决定了 Linux 内核允许的单个 TCP socket 的最大 backlog 队列长度也就是listen()的 backlog 参数在内核层面的上限。如果 somaxconn 的值小于 tcp-backlogRedis 会打印如下信息此时在listen()函数中实际生效的就是 somaxconn 的值。# WARNING: The TCP backlog setting of 512 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.设置建议对于普通的 Web 服务几百并发默认值511已经足够。如果是高并发服务上万并发 TCP 连接可将 backlog 调整为 4096 甚至更高。disable-thpcreateBoolConfig(disable-thp, NULL, IMMUTABLE_CONFIG, server.disable_thp, 1, NULL, NULL),disable-thp 用来控制是否在进程级别关闭透明大页。默认值是 1对应的内部变量是 server.disable_thp。实例在启动时会调用linuxMemoryWarnings判断 Linux 系统内存相关的配置。void linuxMemoryWarnings(void) { sds err_msg NULL; if (checkOvercommit(err_msg) 0) { serverLog(LL_WARNING,WARNING %s, err_msg); sdsfree(err_msg); } if (checkTHPEnabled(err_msg) 0) { server.thp_enabled 1; if (THPDisable() 0) { server.thp_enabled 0; } else { serverLog(LL_WARNING, WARNING %s, err_msg); } sdsfree(err_msg); } }在具体实现上该函数首先会调用checkOvercommit获取/proc/sys/vm/overcommit_memory的值如果 overcommit_memory 值不为 1则会提示以下警告信息。# WARNING Memory overcommit must be enabled! Without it, a background save or replication may fail under low memory condition. Being disabled, it can can also cause failures without low memory condition, see https://github.com/jemalloc/jemalloc/issues/1328. To fix this issue add vm.overcommit_memory 1 to /etc/sysctl.conf and then reboot or run the command sysctl vm.overcommit_memory1 for this to take effect.在 Redis 中建议将 vm.overcommit_memory 设置为 1否则fork()子进程如 RDB、AOF rewrite、全量复制操作在低内存时会失败。接着会调用checkTHPEnabled获取/sys/kernel/mm/transparent_hugepage/enabled的值如果它的值为 always则会调用THPDisable()。该函数会基于 server.disable_thp 的值来决定是否禁用当前进程的透明大页。#include sys/prctl.h /* since linux-3.5, kernel supports to set the state of the THP disable flag * for the calling thread. PR_SET_THP_DISABLE is defined in linux/prctl.h */ static int THPDisable(void) { int ret -EINVAL; if (!server.disable_thp) return ret; #ifdef PR_SET_THP_DISABLE ret prctl(PR_SET_THP_DISABLE, 1, 0, 0, 0); #endif return ret; }如果该变量的值为 0则不关闭如果为 1则会调用prctl(PR_SET_THP_DISABLE, 1, 0, 0, 0)禁用当前进程的透明大页。如果操作系统开启了透明大页且 Redis 没有关闭则会在日志中打印以下信息。# WARNING You have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command echo madvise /sys/kernel/mm/transparent_hugepage/enabled as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled (set to madvise or never).在 Redis 中建议关闭透明大页否则fork()操作可能因管理大页而显著变慢而且内存紧张时合并/拆分大页操作也会增加 Redis 响应耗时。设置建议默认值即可。tcp-keepalivecreateIntConfig(tcp-keepalive, NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tcpkeepalive, 300, INTEGER_CONFIG, NULL, NULL),tcp-keepalive 的默认值为 300对应的内部变量是 server.tcpkeepalive。Redis 会在建立 TCP 连接后调用anetKeepAlive函数通过 setsockopt 配置底层 socket 的 keepalive 参数int anetKeepAlive(char *err, int fd, int interval) { int val 1; // 启用 TCP keepalive if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, val, sizeof(val)) -1) { anetSetError(err, setsockopt SO_KEEPALIVE: %s, strerror(errno)); return ANET_ERR; } // 设置首次探测时间空闲 interval 秒后开始发送第一个探测包 val interval; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, val, sizeof(val)) 0) { anetSetError(err, setsockopt TCP_KEEPIDLE: %s\n, strerror(errno)); return ANET_ERR; } // 设置探测间隔每 interval/3 秒发送一次探测包 val interval/3; if (val 0) val 1; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, val, sizeof(val)) 0) { anetSetError(err, setsockopt TCP_KEEPINTVL: %s\n, strerror(errno)); return ANET_ERR; } // 设置最大探测次数连续 3 次探测失败后判定连接已死 val 3; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, val, sizeof(val)) 0) { anetSetError(err, setsockopt TCP_KEEPCNT: %s\n, strerror(errno)); return ANET_ERR; } ... return ANET_OK; }下面是 Redis 中的设置与 Linux 对应参数默认值的对比选项对应内核参数作用代码逻辑与默认值对比TCP_KEEPIDLEnet.ipv4.tcp_keepalive_time首次探测的等待时间。连接空闲多久后发送第一个探测包。设为 tcp-keepalive。默认是 7200 秒。TCP_KEEPINTVLnet.ipv4.tcp_keepalive_intvl探测间隔。两个探测包之间的时间。设为 tcp-keepalive/3最小为1。默认是 75 秒。TCP_KEEPCNTnet.ipv4.tcp_keepalive_probes探测次数。达到次数后仍无响应则判定连接死亡。固定设为 3。默认是 9 次。Redis 通过在连接级别设置较短的 TCP Keepalive 参数可以提前发现死掉或异常的客户端连接避免长时间保留无效连接占用资源提高 Redis 在长连接和高并发场景下的稳定性。设置建议默认值即可。如果网络质量特别差可设置更小的值。xxx_cpulistRedis 支持通过参数对不同执行单元进行 CPU 绑定相关参数如下createStringConfig(server_cpulist, NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.server_cpulist, NULL, NULL, NULL), createStringConfig(bio_cpulist, NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bio_cpulist, NULL, NULL, NULL), createStringConfig(aof_rewrite_cpulist, NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.aof_rewrite_cpulist, NULL, NULL, NULL), createStringConfig(bgsave_cpulist, NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bgsave_cpulist, NULL, NULL, NULL),其中server_cpulist、bio_cpulist、bgsave_cpulist、aof_rewrite_cpulist 分别用于绑定 Redis 主线程、BIO 后台线程、RDB 子进程和 AOF rewrite 子进程。在实现上Redis 提供了setcpuaffinity函数来封装 CPU 绑定操作该函数会根据操作系统调用对应接口。例如在 Linux 上它会使用sched_setaffinity将线程绑定到指定核心。void redisSetCpuAffinity(const char *cpulist) { #ifdef USE_SETCPUAFFINITY setcpuaffinity(cpulist); #else UNUSED(cpulist); #endif } /* set current thread cpu affinity to cpu list, this function works like * taskset command (actually cpulist parsing logic reference to util-linux). * example of this function: 0,2,3, 0,2-3, 0-20:2. */ void setcpuaffinity(const char *cpulist) { ... #ifdef __linux__ sched_setaffinity(0, sizeof(cpuset), cpuset); #endif #ifdef __FreeBSD__ cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(cpuset), cpuset); #endif #ifdef __DragonFly__ pthread_setaffinity_np(pthread_self(), sizeof(cpuset), cpuset); #endif #ifdef __NetBSD__ pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset), cpuset); cpuset_destroy(cpuset); #endif }对于 Redis 这种单线程应用主线程只能在一个 CPU 核心上处理请求来说进行 CPU 绑定的好处显而易见减少 CPU 上下文切换、提高 CPU Cache 命中率、避免受到其他高负载任务的干扰。唯一的不足是增加了运维的配置和管理成本。oom-score-adj、oom-score-adj-valuescreateEnumConfig(oom-score-adj, NULL, MODIFIABLE_CONFIG, oom_score_adj_enum, server.oom_score_adj, OOM_SCORE_ADJ_NO, NULL, updateOOMScoreAdj), createSpecialConfig(oom-score-adj-values, NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigOOMScoreAdjValuesOption, getConfigOOMScoreAdjValuesOption, rewriteConfigOOMScoreAdjValuesOption, updateOOMScoreAdj),其中oom-score-adj是否启用 OOM 调整。可选值有no禁用。默认值。yes / relative启用相对模式在系统原始 OOM 值的基础上叠加。absolute启用绝对模式直接使用配置值不考虑系统原始值。oom-score-adj-values针对不同角色设置 OOM 权重。该参数需要配置三个值分别对应主节点、从节点、后台子进程BGCHILD用于 RDB / AOF / 复制。默认值为0 200 800。数值越大表示在系统内存紧张时越容易被 OOM Killer 杀死。无论是修改 oom-score-adj 还是 oom-score-adj-values都会调用updateOOMScoreAdj。该函数会根据进程角色主节点、从节点或后台子进程为当前进程设置对应的 oom_score_adj 值。static int updateOOMScoreAdj(const char **err) { if (setOOMScoreAdj(-1) C_ERR) { *err Failed to set current oom_score_adj. Check server logs.; return0; } return1; } // 根据进程类别设置 Redis 的 oom_score_adjprocess_class: 进程类别-1 表示自动选择 int setOOMScoreAdj(int process_class) { // 如果传入 -1自动根据角色选择 master 或 replica if (process_class -1) process_class (server.masterhost ? CONFIG_OOM_REPLICA : CONFIG_OOM_MASTER); serverAssert(process_class 0 process_class CONFIG_OOM_COUNT); #ifdef HAVE_PROC_OOM_SCORE_ADJ // oom_score_adjusted_by_redis标记 Redis 是否已修改过 OOM 分数。 // oom_score_adj_base保存原始 OOM 分数以便回滚或禁用时恢复。 staticint oom_score_adjusted_by_redis 0; staticint oom_score_adj_base 0; int fd; int val; char buf[64]; // oom_score_adj 不为 no if (server.oom_score_adj ! OOM_SCORE_ADJ_NO) { // 第一次修改时备份原始 oom_score_adj if (!oom_score_adjusted_by_redis) { oom_score_adjusted_by_redis 1; /* Backup base value before enabling Redis control over oom score */ fd open(/proc/self/oom_score_adj, O_RDONLY); if (fd 0 || read(fd, buf, sizeof(buf)) 0) { serverLog(LL_WARNING, Unable to read oom_score_adj: %s, strerror(errno)); if (fd ! -1) close(fd); return C_ERR; } oom_score_adj_base atoi(buf); close(fd); } // 获取配置的 OOM 分数 val server.oom_score_adj_values[process_class]; // 如果是相对模式累加原始值 if (server.oom_score_adj OOM_SCORE_RELATIVE) val oom_score_adj_base; // 限制值在 [-1000, 1000] if (val 1000) val 1000; if (val -1000) val -1000; } elseif (oom_score_adjusted_by_redis) { // 如果配置禁用 OOM 调整且之前已修改过恢复原始值 oom_score_adjusted_by_redis 0; val oom_score_adj_base; } else { return C_OK; } snprintf(buf, sizeof(buf) - 1, %d\n, val); // 写入 /proc/self/oom_score_adj使内核生效 fd open(/proc/self/oom_score_adj, O_WRONLY); if (fd 0 || write(fd, buf, strlen(buf)) 0) { serverLog(LL_WARNING, Unable to write oom_score_adj: %s, strerror(errno)); if (fd ! -1) close(fd); return C_ERR; } close(fd); return C_OK; #else /* Unsupported */ return C_ERR; #endif }具体实现上系统的原始 OOM 值通过读取/proc/self/oom_score_adj获取。在第一次启用 OOM 调整时Redis 会将该原始值保存到 oom_score_adj_base 中以便在后续禁用配置或发生配置回滚时能够将进程的 OOM 值恢复为调整前的状态。无论是相对模式还是绝对模式最终生效的 OOM 值都会写回到/proc/self/oom_score_adj。