优化TCP服务端

发布于 2020-03-12 15:56:28

TCP 参数调整

复制到 /etc/sysctl.conf 然后执行 sysctl -p。使用了 BBR 拥塞控制算法。

这些参数可以在 https://sysctl-explorer.net/net/ 查看。

# TCP服务端优化 /etc/sysctl.conf
# 1.https://github.com/shadowsocks/shadowsocks/wiki/Optimizing-Shadowsocks
# 2.https://www.vultr.com/docs/how-to-setup-tcp-optimization-on-linux
# 3.https://wiki.archlinux.org/index.php/sysctl#Improving_performance
# 4.https://sysctl-explorer.net/net/
# $ sysctl --system
# old system:
# $ sysctl -p /etc/sysctl.conf (default is this file, optianl argument)

# Show your congestion control status:
# $ sysctl net.ipv4.tcp_available_congestion_control
# $ sysctl net.ipv4.tcp_congestion_control

# If you use Supervisor, Make sure you have the following line in /etc/default/supervisor.
# $ ulimit -n 51200
# Once you added that line, restart Supervisor (service stop supervisor && service start supervisor).
# If you run shadowsocks in the background in other ways, make sure to add `ulimit -n 51200` in your init script.

#################################################

# max open files
fs.file-max = 51200

# 表示用于向外连接的端口范围。缺省情况下很小:32768到61000,改为10000到65000
net.ipv4.ip_local_port_range = 10000 65000

# 高延迟hybal,低延迟cubic;或使用下面的BBR(需4.9以上内核)
# net.ipv4.tcp_congestion_control=hybla

# FOR GOOGLE BBR
net.core.default_qdisc=fq
net.ipv4.tcp_congestion_control=bbr

# shadowguard
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1

# Increase the maximum connections, default 128
net.core.somaxconn = 65535
# 额外的,对于内核版本新于3.7.1的,我们可以开启tcp_fastopen:
net.ipv4.tcp_fastopen = 3

# Sets whether TCP should start at the default window size only for new connections or also for existing connections that have been idle for too long.
# This setting kills persistent single connection performance and could be turned off
net.ipv4.tcp_slow_start_after_idle = 0

# The received frames will be stored in this queue after taking them from the ring buffer on the network card
net.core.netdev_max_backlog = 100000
# maximum number of sockets in TIME_WAIT state, after reaching this number the system will start destroying the socket that are in this state
net.core.netdev_budget = 50000
net.core.netdev_budget_usecs = 5000

# 表示SYN队列的长度,默认为1024
net.ipv4.tcp_max_syn_backlog = 30000
# 表示开启SYN Cookies。当出现SYN等待队列溢出时,启用cookies来处理,可防范少量SYN攻击
net.ipv4.tcp_syncookies = 1

# 对于tw的reuse、recycle其实是违反TCP协议规定的,服务器资源允许、负载不大的条件下,尽量不要打开
# 允许将TIME-WAIT sockets重新用于新的TCP连接,默认为0,表示关闭
net.ipv4.tcp_tw_reuse = 0
# Warning: DO NOT ENABLE net.ipv4.tcp_tw_recycle!!!
# 表示开启TCP连接中TIME-WAIT sockets的快速回收,默认为0,表示关闭
net.ipv4.tcp_tw_recycle = 0
# 表示系统同时保持TIME_WAIT的最大数量
net.ipv4.tcp_max_tw_buckets = 2000000

# 表示当keepalive起用的时候,TCP发送keepalive消息的频度。缺省是2小时。
# With the following settings, your application will detect dead TCP connections after 120 seconds (60s + 10s + 10s + 10s + 10s + 10s + 10s).
net.ipv4.tcp_keepalive_time = 60
net.ipv4.tcp_keepalive_intvl = 10
net.ipv4.tcp_keepalive_probes = 6

# 修改系统默认的 FIN_WAIT_2 到 TIME_WAIT 的超时时间
net.ipv4.tcp_fin_timeout = 30

net.ipv4.tcp_mtu_probing=1
# increase TCP max buffer size settable using setsockopt()
net.core.rmem_max = 67108864
net.core.wmem_max = 67108864
net.core.rmem_default = 67108864
net.core.wmem_default = 67108864
# tcp_rmem and tcp_wmem are per socket in bytes.
net.ipv4.tcp_rmem = 4096 1048576 67108864
net.ipv4.tcp_wmem = 4096 65536 67108864
# tcp_mem is for all TCP streams, in 4096-byte pages.
# This server might have 2048 clients simultaneously, so:
#   max(tcp_wmem) * 2 * 2048 / 4096
net.ipv4.tcp_mem = 67108864 67108864 67108864

# UDP
net.ipv4.udp_rmem_min = 8192
net.ipv4.udp_wmem_min = 8192

# TCP SACK (TCP Selective Acknowledgement),
# DSACK (duplicate TCP SACK), and FACK (Forward Acknowledgement)
net.ipv4.tcp_sack = 1
net.ipv4.tcp_dsack = 1
net.ipv4.tcp_fack = 1

对比调整效果

使用 iperf 可以测试网络速度,默认使用 50001 端口。

# 服务端
$ iperf -s

# 客户端
$ iperf -c $yourhost -i 2 -t 30

关于 TCP 拥塞控制算法选择

comments powered by Disqus