@liruiyi962464
2025-05-27T09:45:54.000000Z
字数 12321
阅读 188
Linux服务器部署
su
Admin@8888
Admin@888
ss -tuln
lsof -i:9060
kill -9
netstat -tpln
netstat -tulnp | grep 8090
netstat -tulnp | grep 8091
echo $PATH
chmod -R 777 文件名
chmod +x ./config
cd /data/py/shuohuang
nohup python3 api.py > /data/py/logs/sp.log 2>&1 &
systemctl start nginx/可以是别的
stop 停止
status 状态
enable 开机自启
start 启动
restart 重启
cd /etc/systemd/system/
[Unit]
Description=Java Applications Service
After=network.target redis.service mysqld.service
[Service]
User=adminis
Environment="JAVA_HOME=/usr/local/java/jdk1.8.0_241"
Environment="PATH=/usr/local/java/jdk1.8.0_241/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/node/node-v14.17.2-linux-x64/bin/"
Environment="CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar"
Type=simple
RemainAfterExit=yes
TimeoutSec=300
# 更改为支持自动重启的设置
Restart=on-failure
RestartSec=5s
# 使用单个 ExecStart 指令来执行所有必要的操作
ExecStart=/bin/sh -c ' \
echo "Switching to /data/sp/daima"; \
cd /data/sp/daima; \
echo "Starting Java application in SP daima..."; \
nohup java -jar jeecg-boot-module-system-3.0.jar > /dev/null 2>&1 & \
'
[Install]
WantedBy=multi-user.target
[Unit]
Description=Java Applications Service
After=network.target redis.service mysqld.service
[Service]
User=root
Environment="JAVA_HOME=/usr/local/java/jdk1.8.0_411"
Environment="PATH=/data/7z/bin:/usr/local/java/jdk1.8.0_411/bin:/usr/local/node/node-v14.21.1-linux-x64/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin:/usr/local/node/node-v14.21.3-linux-x64/bin"
Environment="CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar"
Type=simple
RemainAfterExit=yes
TimeoutSec=300
# 更改为支持自动重启的设置
Restart=on-failure
RestartSec=5s
# 使用单个 ExecStart 指令来执行所有必要的操作
ExecStart=/bin/sh -c ' \
echo "Switching to /data/lte/daima"; \
cd /data/lte/daima; \
echo "Starting Java application in LTE daima..."; \
nohup java -jar jeecg-boot-module-system-3.0.jar > /dev/null 2>&1 & \
'
[Install]
WantedBy=multi-user.target
[Unit]
Description=HTTP Server Service
After=network.target
[Service]
User=adminis
WorkingDirectory=/data/chinaMap/mapabc/roadmap
ExecStart=/usr/local/node/node-v14.17.2-linux-x64/bin/http-server .
Restart=on-failure
RestartSec=5s
Environment="NODE_ENV=production"
[Install]
WantedBy=multi-user.target
[Unit]
Description=HTTP Server Service
After=network.target
[Service]
User=root
WorkingDirectory=/data/chinaMap/mapabc/roadmap
ExecStart=/usr/local/node/node-v14.21.3-linux-x64/bin/http-server .
Restart=on-failure
RestartSec=5s
Environment="NODE_ENV=production"
[Install]
WantedBy=multi-user.target
[Unit]
Description=WebRTC Streamer Service
After=network.target docker.service
[Service]
User=adminis
Type=simple
Restart=on-failure
RestartSec=5s
# 使用单个 ExecStart 指令来执行所有必
ExecStart=/bin/sh -c ' \
/usr/bin/docker stop webrtc-streamer || true; \
docker ps -a | grep webrtc-streamer && docker rm webrtc-streamer || true; \
if ! systemctl is-active docker; then sudo systemctl start docker; fi; \
docker run -d --network=host --name webrtc-streamer --memory 512m --memory-swap 512m mpromonet/webrtc-streamer \
'
[Install]
WantedBy=multi-user.target
[Unit]
Description=WebRTC Streamer Service
After=network.target docker.service
[Service]
User=adminis
Type=simple
Restart=on-failure
RestartSec=5s
# 使用单个 ExecStart 指令来执行所有必
ExecStart=/bin/sh -c ' \
/usr/bin/docker stop webrtc-streamer || true; \
docker ps -a | grep webrtc-streamer && docker rm webrtc-streamer || true; \
if ! systemctl is-active docker; then sudo systemctl start docker; fi; \
docker run -d --network=host --name webrtc-streamer --memory 4G --memory-swap 8G mpromonet/webrtc-streamer \
[Install]
WantedBy=multi-user.target
// 重新加载 systemd 配置
sudo systemctl daemon-reload
// 启用并启动服务
sudo systemctl enable http-server.service
sudo systemctl enable webrtc-streamer.service
sudo systemctl enable java-applications.service
sudo systemctl start http-server.service
sudo systemctl start webrtc-streamer.service
sudo systemctl start java-applications.service
// 检查服务状态
sudo systemctl status http-server.service
sudo systemctl status webrtc-streamer.service
sudo systemctl status java-applications.service
// 检查服务状态
journalctl -u http-server.service
journalctl -u webrtc-streamer.service
journalctl -u java-applications.service
java -version
cd /usr/local/java/jdk1.8.0_241/bin
vi /etc/profile
source /etc/profile
// 不确定是否有用
在命令框输入vim ~/.bashrc,在打开文件中加入
source /etc/profile
// 切记 如果发生关闭远程访问 地图无法访问 再次打开 启动地图后 使用指令 exit 退出远程
cd /data/chinaMap/mapabc/roadmap
nohup http-server > /data/chinaMapLogs/map.log 2>&1 &
docker run -d --network=host --name webrtc-streamer --memory 4G --memory-swap 8G mpromonet/webrtc-streamer --idle-timeout 60
docker run -it --network=host --name webrtc-streamer --memory 4G --memory-swap 8G
mpromonet/webrtc-streamer --idle-timeout 60
# 实时查看容器内存/CPU
docker stats webrtc-streamer
# 查看RTSP流连接数
docker logs webrtc-streamer | grep "New peer connection"
sudo docker rm webrtc-streamer
sudo systemctl restart docker
// 有问题 sudo docker run -d -p 8000:8000 --name webrtc-streamer mpromonet/webrtc-streamer
sudo docker run -d --network=host --name webrtc-streamer mpromonet/webrtc-streamer
// 查询日志
sudo docker logs webrtc-streamer
// 手动关闭
sudo docker stop webrtc-streamer
sudo docker rm webrtc-streamer
// 测试代码
docker run -it -p 13100:8000 --name webrtc-streamer mpromonet/webrtc-streamer
sudo docker run -d --network=host --name webrtc-streamer --memory 512m --memory-swap 512m mpromonet/webrtc-streamer
sudo docker stop webrtc-streamer
sudo docker rm webrtc-streamer
docker run -d --network=host --name webrtc-streamer --memory 4G --memory-swap 8G mpromonet/webrtc-streamer
配置文件 vi /etc/rc.local
// 授权执行
chmod +x /etc/rc.local
lsof -i:9060
kill -9
cd /data/sp/daima
nohup java -jar jeecg-boot-module-system-3.0.jar>/data/sp/logs/spring.log 2>&1 &
nohup java -jar jeecg-boot-module-system-3.0.jar > /dev/null 2>&1 &
// 查看是否启动
cat /data/sp/logs/spring.log | tail -n 50
cd /data/lte/daima
nohup java -jar jeecg-boot-module-system-3.0.jar>/data/lte/logs/spring.log 2>&1 &
nohup java -jar jeecg-boot-module-system-3.0.jar > /dev/null 2>&1 &
// 查看是否启动
cat /data/lte/logs/spring.log | tail -n 50
// 官网下载node
https://nodejs.org/en/download/
// 下载好以后传入服务器
// 创建文件夹
mkdir /usr/local/node
// 进入
cd /usr/local/node
// 解压1
tar -zxvf node-v14.17.0-linux-x64.tar.gz
// 解压2(因为可能存在下载的安装包为.xz结尾)
tar -Jxvf node-v14.21.3-linux-x64.tar.xz
// 使用ln -s创建软连接
ln -s /usr/local/node/node-v14.17.0-linux-x64/bin/npm /usr/local/bin/npm
ln -s /usr/local/node/node-v14.17.0-linux-x64/bin/node /usr/local/bin/node
// 配置环境变量
vim /etc/profile
// 最下边加入环境变量
NODE_HOME=/usr/local/node/node-v14.17.0-linux-x64
PATH=$NODE_HOME/bin:$PATH
export NODE_HOME PATH
// 让刚才的配置生效
source /etc/profile
// 测试
node -v npm -v
// 确定node安装好后执行命令
// 安装
npm install -g http-server
// 卸载
npm uninstall -g http-server
在nginx官网下载源码包。https://nginx.org/download/
yum -y install gcc pcre-devel zlib-devel openssl openssl-devel
如果不确定是否安装依赖,执行以下命令查看是否安装,执行后有输出结果代表已经安装过了。 命令中双引号" "内的内容可自行替换。
yum list installed | grep "pcre-devel"
tar -zxvf nginx-1.20.1.tar.gz
参数说明
-x:解压
-z:用 gzip 压缩的文件操作
-v:显示所有过程
-f:使用文件名,注意,这个参数是最后一个参数,后面只能接文件名,不能再加参数。
这里只介绍当前命令用到的选项,其它可自行百度学习。
cd nginx-1.20.1
源码的安装一般由3个步骤组成:配置(configure)、编译(make)、安装(make install)。
(1)先执行配置命令
./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module
(2)执行编译命令
make
(3)执行安装命令
make install
这时候,/usr/local下就已经生成了nginx目录。
(4)查看nginx版本信息:
#进入可执行目录sbin
cd /usr/local/nginx/sbin/
#查看nginx版本信息
./nginx -v
#查看nginx版本信息、编译版本、配置参数 大写字母V
./nginx -V
1.检查配置文件中语法是否正确
/usr/local/nginx/sbin/nginx -t
2.启动
进入可执行目录sbin
cd /usr/local/nginx/sbin/
启动nginx
#等同于/usr/local/nginx/sbin/nginx,也可使用这个命令/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
./nginx
# 停止命令, 配合./nginx命令也可实现重启
./nginx -s stop
# 也是停止命令,快速关闭 配合./nginx命令也可实现重启
./nginx -s quit
# 重启命令,重新加载配置文件
./nginx -s reload
3.访问nginx页面
http://IP地址:端口号 ngixn默认端口号是80,可在配置文件中(在安装目录/config/nginx.conf)查看修改。
如下图,就是成功的页面(这个页面的文件在/usr/local/nginx/html/index.html)。
(1)如果访问不到,请检查nginx端口是否存在,没用任何输出说明不存在。
netstat -nltp | grep 80
(2)防火墙需要关闭,如果不想关闭请直接看第(3)步。
#查看防火墙状态
systemctl status firewalld
#关闭防火墙
systemctl stop firewalld
#开启防火墙
systemctl start firewalld
(3)如果不想关闭防火墙,则需要添加防火墙对外开放的nginx端口 。
# 添加防火墙对外开放的nginx 80端口
firewall-cmd --permanent --zone=public --add-port=80/tcp
# 重新加载防火墙配置
firewall-cmd --reload
# 查看防火墙开放的所有端口,看看是否有80端口
firewall-cmd --zone=public --list-ports
1.nginx设置开机自启,要操作的文件是/etc/rc.d/rc.local
#给予执行权限
chmod +x /etc/rc.d/rc.local
#打开文件/etc/rc.local超链接指向了/etc/rc.d/rc.local
vi /etc/rc.d/rc.local
#输入i,光标移动到最后一行上,添加下面的命令(nginx启动命令)
/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
2.nginx配置文件
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
# ...
# 设置连接超时时间为30秒
fastcgi_connect_timeout 600s;
# 设置请求超时时间为60秒
fastcgi_send_timeout 600s;
fastcgi_read_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
client_body_timeout 600s;
client_header_timeout 600s;
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#定义变量,兼容HTTP和websocket两种请求协议
map $http_upgrade $connection_upgrade {
default keep-alive; #默认 keep-alive,表示HTTP协议。
'websocket' upgrade;#若是 websocket 请求,则升级协议 upgrade。
}
#gzip on;
server{
listen 3031;
server_name localhost;
#添加头部信息
proxy_set_header Cookie $http_cookie;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-real-ip $remote_addr;
#charset koi8-r;
#access_log logs/host.access.log main;
client_max_body_size 500m;
#添加拦截路径和代理地址
location /api/ {
proxy_pass http://localhost:8091/; #注意:使用代理地址时末尾记得加上斜杠"/"。
}
location /jeecg-boot-file {
#proxy_set_header Host $Host;
#proxy_set_header X-Forward-For $remote_addr;
proxy_pass http://localhost:8091;
}
location /jeecg-boot {
proxy_set_header Host $Host;
proxy_set_header X-Forward-For $remote_addr;
proxy_pass http://localhost:8091;
}
location / {
root ./html/dist_lte;
index index.html index.htm;
try_files $uri $uri/ /index.html =404;
}
#websocket的代理规则
location /jeecg-boot/websocket {
proxy_pass http://localhost:8091; #转发到后端接口
proxy_read_timeout 60s; #设置超时时间,默认是60
proxy_http_version 1.1;
proxy_set_header Host $host;#这个配置不要漏了,必须要
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
指令
// disp current-configuration
nat address-group 0 10.51.100.17 10.51.100.17
nat server 0 protocol tcp global 10.51.100.17 8090 inside 172.0.0.10 8090
nat server 1 protocol tcp global 10.51.100.17 11554 inside 172.0.0.20 rtsp
nat server 2 protocol tcp global 10.51.100.17 11443 inside 172.0.0.20 443
nat server 3 protocol tcp global 10.51.100.17 12554 inside 172.0.0.21 rtsp
nat server 4 protocol tcp global 10.51.100.17 12443 inside 172.0.0.21 443
nat server 5 protocol tcp global 10.51.100.17 13554 inside 172.0.0.22 rtsp
nat server 6 protocol tcp global 10.51.100.17 13443 inside 172.0.0.22 443
nat server 7 protocol tcp global 10.51.100.17 14554 inside 172.0.0.23 rtsp
nat server 8 protocol tcp global 10.51.100.17 14443 inside 172.0.0.23 443
nat server 9 protocol tcp global 10.51.100.17 15554 inside 172.0.0.24 rtsp
nat server 10 protocol tcp global 10.51.100.17 15443 inside 172.0.0.24 443
nat server 11 protocol tcp global 10.51.100.17 16554 inside 172.0.0.25 rtsp
nat server 12 protocol tcp global 10.51.100.17 16443 inside 172.0.0.25 443
nat server 13 protocol tcp global 10.51.100.17 17554 inside 172.0.0.26 rtsp
nat server 14 protocol tcp global 10.51.100.17 17443 inside 172.0.0.26 443
nat server 15 protocol tcp global 10.51.100.17 18554 inside 172.0.0.27 rtsp
nat server 16 protocol tcp global 10.51.100.17 18443 inside 172.0.0.27 443
nat server 17 protocol tcp global 10.51.100.17 19554 inside 172.0.0.28 rtsp
nat server 18 protocol tcp global 10.51.100.17 19443 inside 172.0.0.28 443
nat server 19 protocol tcp global 10.51.100.17 10554 inside 172.0.0.29 rtsp
nat server 20 protocol tcp global 10.51.100.17 10443 inside 172.0.0.29 443
nat server 21 protocol tcp global 10.51.100.17 3030 inside 172.0.0.10 3030
nat server 22 protocol tcp global 10.51.100.17 3031 inside 172.0.0.11 3031
// 按照顺序增加 增加完成后 指令save