多并发网络优化 淘汰-第一版学习使用 第一个优化方式 先创建一个optimization.sh文件nano optimization.sh 在编辑器中输入
#!/bin/bash
if ! command -v ethtool &> /dev/null || ! command -v nmcli &> /dev/null; then echo "ethtool或nmcli未安装,正在安装..." sudo apt update sudo apt install -y ethtool network-manager echo "ethtool和nmcli已安装" else echo "ethtool和nmcli已安装" fi
interfaces=$(nmcli device status | awk '{print $1}' | grep -v DEVICE)
for interface in $interfaces; do # 使用nmcli增加环缓冲的大小 echo "Setting ring buffer size for interface $interface..." sudo nmcli connection modify $interface txqueuelen 10000
# 调优网络设备积压队列以避免数据包丢弃
echo "Tuning network device backlog for interface $interface..."
sudo nmcli connection modify $interface rxqueuelen 10000
# 增加NIC的传输队列长度
echo "Increasing NIC transmission queue length for interface $interface..."
sudo nmcli connection modify $interface transmit-hash-policy layer2+3
done
if [ "$(sudo dmidecode -s system-product-name)" == "KVM" ]; then echo "系统虚拟化类型为 KVM,正在关闭 TSO 和 GSO..." for interface in $(nmcli device status | awk '{print $1}' | grep -v DEVICE); do sudo ethtool -K $interface tso off gso off echo "TSO 和 GSO 已关闭于接口 $interface" done else echo "系统虚拟化类型非 KVM,不需要关闭 TSO 和 GSO。" fi
cp /etc/sysctl.conf /etc/sysctl.conf.bak
echo "* hard nofile 65535" >> /etc/security/limits.conf echo "* soft nofile 65535" >> /etc/security/limits.conf
cat << EOF >> /etc/sysctl.conf
net.core.somaxconn = 65535
net.core.rmem_max = 16777216 net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.tcp_max_tw_buckets = 65535
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 10
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_keepalive_time = 300 net.ipv4.tcp_keepalive_probes = 5 net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_limit_output_bytes = 131072
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_fastopen = 3
net.core.rmem_default = 262144 net.core.wmem_default = 262144 net.core.rmem_max = 67108864 net.core.wmem_max = 67108864 net.core.optmem_max = 65536 net.core.netdev_budget = 300
net.core.netdev_max_backlog = 5000 net.ipv4.tcp_mtu_probing = 1
net.ipv6.conf.all.disable_ipv6 = 0 net.ipv6.conf.default.disable_ipv6 = 0 net.ipv6.conf.all.accept_ra = 2 net.ipv6.conf.default.accept_ra = 2 net.ipv6.conf.all.accept_ra_pinfo = 1 net.ipv6.conf.default.accept_ra_pinfo = 1 net.ipv6.conf.all.accept_ra_defrtr = 1 net.ipv6.conf.default.accept_ra_defrtr = 1 net.ipv6.conf.all.autoconf = 1 net.ipv6.conf.default.autoconf = 1 net.ipv6.conf.all.max_addresses = 16 net.ipv6.conf.default.max_addresses = 16 net.ipv6.conf.all.accept_redirects = 2 net.ipv6.conf.default.accept_redirects = 2 net.ipv6.conf.all.router_solicitations = 0 net.ipv6.conf.default.router_solicitations = 0 net.ipv6.conf.all.dad_transmits = 0 net.ipv6.conf.default.dad_transmits = 0 EOF
sysctl -p
for interface in $interfaces; do echo "Tuning network queue disciplines (Qdiscs) and TCP retransmission for interface $interface..." sudo tc qdisc add dev $interface root fq sudo tc qdisc change dev $interface root fq maxrate 90mbit sudo tc qdisc change dev $interface root fq burst 15k sudo tc qdisc add dev $interface ingress sudo tc filter add dev $interface parent ffff: protocol ip u32 match u32 0 0 action connmark action mirred egress redirect dev ifb0 sudo tc qdisc add dev ifb0 root sfq perturb 10 sudo ip link set dev ifb0 up sudo ethtool -K $interface tx off rx off done
for interface in $interfaces; do echo "Setting priority for TCP and UDP traffic on interface $interface..." sudo iptables -A OUTPUT -t mangle -p tcp -o $interface -j MARK --set-mark 10 sudo iptables -A OUTPUT -t mangle -p udp -o $interface -j MARK --set-mark 20 sudo iptables -A PREROUTING -t mangle -i $interface -j MARK --set-mark 10 sudo iptables -A PREROUTING -t mangle -p udp -i $interface -j MARK --set-mark 20 done
if lsmod | grep -q "^tcp_bbr "; then echo "BBR 模块已安装" else # 安装 BBR 模块 echo "安装 BBR 模块..." sudo modprobe tcp_bbr echo "tcp_bbr" | sudo tee -a /etc/modules-load.d/modules.conf echo "net.core.default_qdisc=fq" | sudo tee -a /etc/sysctl.conf echo "net.ipv4.tcp_congestion_control=bbr" | sudo tee -a /etc/sysctl.conf sudo sysctl -p fi
if sysctl net.ipv4.tcp_congestion_control | grep -q "bbr"; then echo "BBR 已启用" else echo "BBR 启用失败,请手动检查您的系统设置" fi
echo "系统优化设置完成。"
大佬建议:
增加bbr开启检测 增加 IPv6 优化参数 增加TCP窗口大小优化 增加TCP 性能峰值优化 增加系统 initcwnd 参数优化 增加Ring Buffer 大小和队列数量优化 增加txqueuelen 参数优化 增加系统虚拟化为KVM则关闭 TSO 和 GSO ethtool和nmcli检测安装 nmcli增加环缓冲的大小 调优网络设备积压队列以避免数据包丢弃 增加NIC的传输队列长度 优化TCP重传次数 调整网络队列处理算法(Qdiscs) 开启TCP Fast Open (TFO) 调整TCP和UDP流量的优先级 已经做好注释.保存文件并退出 然后运行脚本sudo bash optimization.sh 如果运行出现
debian_optimization.sh: line 2: $'\r': command not found
debian_optimization.sh: line 30: syntax error near unexpected token $'do\r'' 'ebian_optimization.sh: line 30:
for interface in $interfaces; do
请安装dos2unixsudo apt install dos2unix 将脚本转换为Unix格式dos2unix optimization.sh 再运行脚本sudo bash optimization.sh
根据[jerry048]大佬的#27-31楼的建议,挑选部分添加,但是有的优化需要搭配实际VPS配置网络环境等情况,怕添加后出现负优化的情况.所以标注出来大家自行前去学习膜拜!jerry048大佬的优化方案,我也做了一个脚本.请大家根据自己VPS的实际情况修改参数 #!/bin/bash
nic_interface=$(ip addr | grep 'state UP' | awk '{print
if ! [ -x "$(command -v ethtool)" ]; then apt-get update apt-get -y install ethtool fi
echo "Checking NIC's missed packet count..." ethtool -S $nic_interface | grep -e rx_no_buffer_count -e rx_missed_errors -e rx_fifo_errors -e rx_over_errors
echo "Increasing the size of NIC's receive buffer..." ethtool -g $nic_interface
ethtool -G $nic_interface rx 2048
echo "Increasing the number of query channels..." ethtool -l $nic_interface
ethtool -L $nic_interface combined 4
echo "Adjusting interrupt coalescing settings..." ethtool -c $nic_interface
ethtool -C $nic_interface rx-usecs 10 tx-usecs 10
echo "Checking softIRQ misses..." cat /proc/net/softnet_stat
echo "Increasing the size of NIC's backlog..."
sysctl -w net.core.netdev_max_backlog=10000
echo "Increasing netdev_budget and netdev_budget_usecs..."
sysctl -w net.core.netdev_budget=50000 sysctl -w net.core.netdev_budget_usecs=8000
echo "Enabling receive buffer auto-tuning..." sysctl -w net.ipv4.tcp_moderate_rcvbuf=1
echo "Enabling TCP window scaling..." sysctl -w net.ipv4.tcp_window_scaling=1
echo "Setting maximum TCP window size..." sysctl -w net.ipv4.tcp_workaround_signed_windows=1
echo "Increasing the maximum number of file descriptors..."
sysctl -w fs.file-max=1000000 sysctl -w fs.nr_open=1000000
echo "Increasing the maximum port range..."
sysctl -w net.ipv4.ip_local_port_range="1024 65535"
echo "Increasing the maximum queue length of completely established sockets..."
sysctl -w net.core.somaxconn=10000
echo "Increasing the maximum number of orphaned connections..."
sysctl -w net.ipv4.tcp_max_orphans=10000
echo "Increasing the maximum number of SYN_RECV sockets..."
sysctl -w net.ipv4.tcp_max_syn_recv=10000
echo "Increasing the maximum number of sockets in TIME_WAIT state..."
sysctl -w net.ipv4.tcp_max_tw_buckets=10000
echo "Quickly discarding sockets in FIN-WAIT-2 state..."
sysctl -w net.ipv4.tcp_fin_timeout=10
echo "Setting TCP socket buffer sizes..."
sysctl -w net.ipv4.tcp_adv_win_scale=-2 sysctl -w net.core.rmem_max=134217728 sysctl -w net.ipv4.tcp_rmem="8192 262144 134217728" sysctl -w net.core.wmem_max=33554432 sysctl -w net.ipv4.tcp_wmem="8192
此优化针对的是多并发,多,多.注意
第二个方式 网络收集的一键优化脚本(能明显优化,但部分系统会出现负优化的问题)
wget https://gist.githubusercontent.com/taurusxin/a9fc3ad039c44ab66fca0320045719b0/raw/3906efed227ee14fc5b4ac8eb4eea8855021ef19/optimize.sh && sudo bash optimize.sh
欢迎大佬们莅临指点补充 小弟也能多学点 yct019