普通用户更新软件源索引(刷新可安装软件列表)
sudo apt update
安装 Python3 + venv + pip
顺便安装 openssl:add_site.sh 里用 openssl rand 生成数据库密码
sudo apt install -y python3 python3-venv python3-pip openssl
查看 Python3 版本,确认安装成功
python3 --version清空(或创建)deploy_v5_1.py,然后打开 nano 编辑
把你的 Python 脚本粘贴进去,保存退出(nano: Ctrl+O 回车保存,Ctrl+X 退出)
mkdir -p ~/scripts
truncate -s 0 ~/scripts/setup.py
nano ~/scripts/setup.py
给脚本加可执行权限(可选,但推荐)
chmod +x ~/scripts/setup.py
执行部署脚本
python3 ~/scripts/setup.py
选项19:外部导入 (imports):DB / WP增量 / 整站 + 新域名提示:
其中选1和选项2,新建站点,必须安装完成后,才可以导入
其中选项3,新建站点,不需要安装,就可以全量导入
--- 导入外部备份 (imports) ---
请选择导入类型:
1) 只导入数据库(导入到现有站点)
2) WordPress 增量导入(DB + uploads/plugins/themes 等)
3) 导入整站(解压站点目录+启动,可选导 db.sql)
选择(1/2/3,回车取消):通过scp上传文件,需要赋予权限后,在scp传输
sudo chown -R kaixinit:kaixinit /data/docker_sites/imports
sudo chmod 755 /data/docker_sites/importsscp "D:\kaixinit.com_20260126_1320.tar.gz" kaixinit@192.168.1.95:/data/docker_sites/imports/ (以下为Python 脚本)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Docker 全栈管理系统 (Python 终极版 V6.0)
- Debian 12/13 友好,普通用户运行(sudo)
- 核心服务:Nginx Proxy Manager + FRP Client
- 站点:WordPress / 通用 PHP
- 镜像源策略(站点创建/核心拉取):
先用 prefix=DaoCloud -> 失败切 DockerProxy -> 再失败则 no-prefix(靠 daemon.json 的 registry-mirrors)
其它(Baidu/163/USTC/SJTU/Tencent)不做前缀,只放进 daemon.json mirrors
- 选项2 新建站点末尾加入“轻量清缓存”(避免首次访问命中 NPM 默认页缓存)
- 选项3:站点列表(脱敏密码、无路径、单行对齐),编号查看“站点详情”(多行)
- imports 外部导入三模式:
1) 只导入数据库(sql/sql.gz 或 tar.gz 内 db.sql)
2) 导入数据库 + 指定 wp-content 子目录(uploads/plugins/themes/...)
3) 导入整站(tar.gz 解压站点目录 + 启动,可选导 db.sql)
三模式若导入 DB:提示“是否更新新域名(home/siteurl)”(安全更新)
"""
import glob
import json
import os
import random
import re
import shutil
import string
import subprocess
import sys
import time
# ================= 配置区域 =================
BASE_DIR = "/data/docker_sites"
BACKUP_DIR = os.path.join(BASE_DIR, "backups")
CACHE_DIR = os.path.join(BASE_DIR, "cache")
SCRIPTS_DIR = os.path.join(BASE_DIR, "scripts")
PROXY_DIR = os.path.join(BASE_DIR, "proxy")
FRPC_DIR = os.path.join(BASE_DIR, "frpc")
IMPORT_DIR = os.path.join(BASE_DIR, "imports")
# 前缀镜像源(只允许这几个作为 IMAGE_MIRROR 前缀使用)
PREFIX_MIRRORS = [
("2", "docker.m.daocloud.io", "DaoCloud (prefix)"),
("1", "dockerproxy.com", "DockerProxy (prefix)"),
("9", "docker.1ms.run", "1ms (prefix)"),
("5", "", "no-prefix (use daemon registry-mirrors)"),
]
# daemon.json 的 registry-mirrors(其它源都放这里,不做前缀)
DAEMON_REGISTRY_MIRRORS = [
"https://docker.mirrors.ustc.edu.cn",
"https://mirror.baidubce.com",
"https://hub-mirror.c.163.com",
"https://mirror.sjtu.edu.cn",
"https://mirror.ccs.tencentyun.com",
"https://docker.m.daocloud.io",
"https://dockerproxy.com",
"https://docker.1ms.run",
]
DEFAULT_DNS = ["1.1.1.1", "8.8.8.8"]
# 初始化安装阶段镜像策略 key(默认 DaoCloud prefix)
INIT_MIRROR_KEY = "2"
# 新建站点阶段镜像策略 key(默认 no-prefix,避免 Baidu 这种前缀解析失败)
SITE_MIRROR_KEY = "5"
# 颜色代码
class Colors:
HEADER = "\033[95m"
BLUE = "\033[94m"
CYAN = "\033[96m"
GREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
# ================= 工具函数 =================
def die(message, code=1):
print(message)
sys.exit(code)
def q(s: str) -> str:
"""shell-safe quote (simple)"""
return "'" + str(s).replace("'", "'\"'\"'") + "'"
def require_non_root():
if os.geteuid() == 0:
die(f"{Colors.FAIL}❌ 请使用普通用户运行此脚本 (需要 sudo){Colors.ENDC}")
if shutil.which("sudo") is None:
die(f"{Colors.FAIL}❌ 需要 sudo,请先安装 sudo 后再运行。{Colors.ENDC}")
subprocess.run(["sudo", "-v"], check=True)
def run_cmd(cmd, ignore_errors=False):
"""执行 Shell 命令(普通用户下通过 sudo 执行)"""
try:
stderr = subprocess.DEVNULL if ignore_errors else None
subprocess.run(["sudo", "bash", "-c", cmd], check=True, stderr=stderr)
return True
except subprocess.CalledProcessError as e:
if not ignore_errors:
print(f"{Colors.FAIL}❌ 命令执行失败: {cmd}{Colors.ENDC}")
print(f"{Colors.FAIL}错误信息: {e}{Colors.ENDC}")
return False
def get_output(cmd):
"""获取命令输出"""
try:
return (
subprocess.check_output(["sudo", "bash", "-c", cmd])
.decode("utf-8", errors="ignore")
.strip()
)
except Exception:
return ""
def ensure_dir(path):
run_cmd(f"mkdir -p {q(path)}", ignore_errors=True)
def write_file(path, content):
subprocess.run(
["sudo", "tee", path],
input=content,
text=True,
check=True,
stdout=subprocess.DEVNULL,
)
def read_file(path):
try:
with open(path, "r", encoding="utf-8") as f:
return f.read()
except Exception:
return get_output(f"cat {q(path)}")
def remove_tree(path):
run_cmd(f"rm -rf {q(path)}", ignore_errors=True)
def sanitize_docker_repo():
"""修正历史遗留的 docker.list 格式错误"""
repo_file = "/etc/apt/sources.list.d/docker.list"
content = read_file(repo_file)
if not content or "$(" not in content:
return
codename = get_output("lsb_release -cs")
if codename == "trixie":
codename = "bookworm"
arch = get_output("dpkg --print-architecture") or "amd64"
fixed = (
f"deb [arch={arch} signed-by=/etc/apt/keyrings/docker.gpg] "
f"https://mirrors.aliyun.com/docker-ce/linux/debian {codename} stable\n"
)
write_file(repo_file, fixed)
def generate_password(length=16):
chars = string.ascii_letters + string.digits
return "".join(random.choice(chars) for _ in range(length))
def get_primary_ip():
ip = get_output("hostname -I | awk '{print $1}'")
return ip or "127.0.0.1"
def has_marker(path):
return get_output(f"test -f {q(path)} && echo ok") == "ok"
def set_marker(path, content):
write_file(path, content)
def clear_marker(path):
run_cmd(f"rm -f {q(path)}", ignore_errors=True)
def pause(prompt="按回车返回..."):
"""等待用户回车,先清空可能残留的输入,避免直接跳过。"""
try:
import select
while select.select([sys.stdin], [], [], 0)[0]:
sys.stdin.read(1)
except Exception:
pass
input(prompt)
def ensure_docker_network(name):
exists = get_output(f"docker network inspect {q(name)} >/dev/null 2>&1 && echo ok")
if exists == "ok":
return
run_cmd(f"docker network create {q(name)}", ignore_errors=True)
def image_ref(prefix: str, image: str) -> str:
"""prefix may be '' (no-prefix)"""
if not prefix:
return image
return f"{prefix.rstrip('/')}/{image}"
def mask_password(pwd: str, keep=3):
if not pwd:
return ""
if len(pwd) <= keep * 2:
return pwd[0] + "***"
return f"{pwd[:keep]}***{pwd[-keep:]}"
def normalize_site_id(site_id: str) -> str:
site_id = site_id.strip()
# allow dots and dashes (domain-like)
site_id = re.sub(r"[^a-zA-Z0-9._-]+", "_", site_id)
return site_id
def safe_int(s: str, default=-1):
try:
return int(s)
except Exception:
return default
# ================= 站点信息读写 =================
def write_site_info(site_dir, info):
payload = json.dumps(info, ensure_ascii=False, indent=2)
write_file(os.path.join(site_dir, ".site_info.json"), payload + "\n")
def read_site_info(site_dir):
raw = read_file(os.path.join(site_dir, ".site_info.json"))
if not raw:
return {}
try:
return json.loads(raw)
except Exception:
return {}
def extract_db_info_from_compose(compose_path):
content = read_file(compose_path)
if not content:
return {}
info = {}
for line in content.splitlines():
if "MYSQL_ROOT_PASSWORD" in line and "db_password" not in info:
info["db_password"] = line.split(":")[-1].strip().replace('"', "")
elif "MYSQL_DATABASE" in line and "db_name" not in info:
info["db_name"] = line.split(":")[-1].strip().replace('"', "")
elif "MYSQL_USER" in line and "db_user" not in info:
info["db_user"] = line.split(":")[-1].strip().replace('"', "")
return info
def ensure_site_info(site_dir):
info = read_site_info(site_dir)
compose_path = os.path.join(site_dir, "docker-compose.yml")
extracted = extract_db_info_from_compose(compose_path) if has_marker(compose_path) else {}
updated = False
for key in ("db_name", "db_user", "db_password"):
if not info.get(key) and extracted.get(key):
info[key] = extracted[key]
updated = True
if info and not info.get("db_host"):
info["db_host"] = "db"
updated = True
if info and not info.get("site_id"):
info["site_id"] = os.path.basename(site_dir)
updated = True
# upstream host name for NPM
if info and not info.get("upstream"):
info["upstream"] = f"{os.path.basename(site_dir)}_nginx"
updated = True
if info and not info.get("created_at"):
info["created_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
updated = True
if updated:
write_site_info(site_dir, info)
return info, updated
# ================= DNS / compose utilities =================
def format_dns_block(dns_servers):
lines = [" dns:"]
for server in dns_servers:
lines.append(f" - {server}")
return "\n".join(lines)
def add_dns_to_compose(compose_path, services, dns_servers):
content = read_file(compose_path)
if not content:
return False
dns_block = format_dns_block(dns_servers)
lines = content.splitlines()
updated_lines = []
updated = False
i = 0
while i < len(lines):
line = lines[i]
updated_lines.append(line)
stripped = line.strip()
if stripped.endswith(":") and stripped[:-1] in services and line.startswith(" "):
service_indent = line[: line.index(stripped)]
next_indent = service_indent + " "
j = i + 1
has_dns = False
while j < len(lines):
next_line = lines[j]
if not next_line.startswith(next_indent) or next_line.strip() == "":
break
if next_line.strip() == "dns:":
has_dns = True
break
j += 1
if not has_dns:
updated_lines.extend(dns_block.splitlines())
updated = True
i += 1
if updated:
write_file(compose_path, "\n".join(updated_lines) + "\n")
return updated
def clean_duplicate_dns(compose_path, services):
content = read_file(compose_path)
if not content:
return False
lines = content.splitlines()
updated_lines = []
current_service = None
dns_seen = 0
skip_block = False
skip_indent = 0
for line in lines:
stripped = line.strip()
if line.startswith(" ") and not line.startswith(" ") and stripped.endswith(":"):
current_service = stripped[:-1]
dns_seen = 0 if current_service in services else -1
skip_block = False
if skip_block:
indent = len(line) - len(line.lstrip())
if indent > skip_indent:
continue
skip_block = False
if current_service in services and stripped == "dns:" and line.startswith(" "):
if dns_seen >= 1:
skip_block = True
skip_indent = len(line) - len(line.lstrip())
continue
dns_seen += 1
updated_lines.append(line)
updated = "\n".join(lines) != "\n".join(updated_lines)
if updated:
write_file(compose_path, "\n".join(updated_lines) + "\n")
return updated
def check_dns_in_container(site_dir):
cmd = (
"curl -s -o /dev/null -w '%{http_code}' "
"--connect-timeout 3 --max-time 5 "
"https://api.wordpress.org/plugins/info/1.2/"
)
output = get_output(
f"cd {q(site_dir)} && docker compose exec -T php_fpm sh -c {q(cmd)}"
)
return output.strip() == "200"
# ================= imports utilities =================
def list_import_files():
ensure_dir(IMPORT_DIR)
files = []
for ext in ("*.tar.gz", "*.zip", "*.sql", "*.sql.gz"):
files.extend(glob.glob(os.path.join(IMPORT_DIR, ext)))
return sorted(files, reverse=True)
# ================= 核心功能类 =================
class DockerOps:
def __init__(self):
require_non_root()
ensure_dir(BASE_DIR)
ensure_dir(CACHE_DIR)
ensure_dir(BACKUP_DIR)
ensure_dir(SCRIPTS_DIR)
ensure_dir(PROXY_DIR)
ensure_dir(FRPC_DIR)
ensure_dir(IMPORT_DIR)
self._image_prefix = "" # current prefix mirror for image_ref
# ---------- 镜像策略 ----------
def _prefix_candidates_for_site(self):
# 站点:先 DaoCloud(prefix) -> DockerProxy(prefix) -> no-prefix
return [("docker.m.daocloud.io", "DaoCloud"),
("dockerproxy.com", "DockerProxy"),
("", "no-prefix")]
def _prefix_candidates_for_core(self):
# 核心服务:同策略
return self._prefix_candidates_for_site()
def _try_pull_with_prefix(self, site_dir, compose_writer_func, candidates):
"""
candidates: list of (prefix, label)
compose_writer_func(prefix) should write docker-compose.yml accordingly
return True on success
"""
for prefix, label in candidates:
self._image_prefix = prefix
compose_writer_func(prefix)
ok = run_cmd(f"cd {q(site_dir)} && docker compose up -d", ignore_errors=True)
if ok:
return True
print(f"{Colors.WARNING}镜像拉取/启动失败,切换镜像策略重试: {label}{Colors.ENDC}")
return False
# ---------- 初始化 ----------
def install_dependencies(self):
print(f"{Colors.BLUE}>>> [1/5] 安装基础依赖...{Colors.ENDC}")
sanitize_docker_repo()
run_cmd(
"apt-get update && apt-get install -y "
"psmisc curl unzip jq ca-certificates gnupg lsb-release python3-pip "
"wget gzip rsync"
)
def clean_ports(self):
print(f"{Colors.WARNING}>>> [2/5] 清理端口冲突 (80/443)...{Colors.ENDC}")
run_cmd("fuser -k 80/tcp 81/tcp 443/tcp", ignore_errors=True)
run_cmd("systemctl stop apache2 nginx", ignore_errors=True)
run_cmd("systemctl disable apache2 nginx", ignore_errors=True)
def install_docker(self):
if shutil.which("docker"):
print(f"{Colors.GREEN}✅ Docker 已安装,跳过安装步骤。{Colors.ENDC}")
return
print(f"{Colors.BLUE}>>> [3/5] 安装 Docker (阿里云 docker-ce 源)...{Colors.ENDC}")
ensure_dir("/etc/apt/keyrings")
run_cmd(
"curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/debian/gpg "
"| gpg --dearmor -o /etc/apt/keyrings/docker.gpg --yes"
)
codename = get_output("lsb_release -cs")
if codename == "trixie":
codename = "bookworm"
arch = get_output("dpkg --print-architecture") or "amd64"
repo = (
f"deb [arch={arch} signed-by=/etc/apt/keyrings/docker.gpg] "
f"https://mirrors.aliyun.com/docker-ce/linux/debian {codename} stable\n"
)
write_file("/etc/apt/sources.list.d/docker.list", repo)
if not run_cmd(
"apt-get update && apt-get install -y docker-ce docker-ce-cli "
"containerd.io docker-compose-plugin"
):
print(f"{Colors.FAIL}❌ Docker 安装失败,请检查源配置后重试。{Colors.ENDC}")
return
daemon_config = {
"registry-mirrors": DAEMON_REGISTRY_MIRRORS,
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-opts": {"max-size": "10m", "max-file": "3"},
}
ensure_dir("/etc/docker")
write_file("/etc/docker/daemon.json", json.dumps(daemon_config, indent=2) + "\n")
if shutil.which("systemctl"):
docker_service = (
os.path.exists("/lib/systemd/system/docker.service")
or os.path.exists("/etc/systemd/system/docker.service")
)
if docker_service:
run_cmd("systemctl restart docker", ignore_errors=True)
def setup_frp_config(self):
print(f"{Colors.BLUE}>>> [4/5] 配置 FRP 穿透...{Colors.ENDC}")
ensure_dir(FRPC_DIR)
config_path = os.path.join(FRPC_DIR, "frpc.toml")
if os.path.exists(config_path):
print(f"{Colors.WARNING}检测到已存在配置文件。{Colors.ENDC}")
choice = input("是否重新录入配置? (y/N): ").strip().lower()
if choice != "y":
return
print(f"{Colors.CYAN}请粘贴 frpc.toml 内容 (输入 END 结束):{Colors.ENDC}")
lines = []
while True:
try:
line = input()
if line.strip() == "END":
break
lines.append(line)
except EOFError:
break
write_file(config_path, "\n".join(lines) + "\n")
print(f"{Colors.GREEN}✅ FRP 配置已保存。{Colors.ENDC}")
def deploy_core_services(self):
print(f"{Colors.BLUE}>>> [5/5] 部署核心服务 (NPM & FRP)...{Colors.ENDC}")
ensure_docker_network("web_network")
def write_proxy_compose(prefix):
npm_yml = f"""services:
app:
image: '{image_ref(prefix, "jc21/nginx-proxy-manager:latest")}'
container_name: proxy-app-1
restart: unless-stopped
ports: ['80:80', '81:81', '443:443']
volumes: ['./data:/data', './letsencrypt:/etc/letsencrypt']
networks: ['web_network']
networks:
web_network: {{ external: true }}
"""
write_file(os.path.join(PROXY_DIR, "docker-compose.yml"), npm_yml)
def write_frp_compose(prefix):
frp_yml = f"""services:
frpc:
image: {image_ref(prefix, "snowdreamtech/frpc:latest")}
container_name: frpc-frpc-1
restart: always
volumes: ['./frpc.toml:/etc/frp/frpc.toml']
networks: ['web_network']
networks:
web_network: {{ external: true }}
"""
write_file(os.path.join(FRPC_DIR, "docker-compose.yml"), frp_yml)
ensure_dir(PROXY_DIR)
ensure_dir(FRPC_DIR)
print("正在启动 Nginx Proxy Manager...")
ok_proxy = self._try_pull_with_prefix(
PROXY_DIR, write_proxy_compose, self._prefix_candidates_for_core()
)
if not ok_proxy:
print(f"{Colors.FAIL}❌ NPM 启动失败,请检查网络或镜像策略。{Colors.ENDC}")
return
print("正在启动 FRP Client...")
ok_frp = self._try_pull_with_prefix(
FRPC_DIR, write_frp_compose, self._prefix_candidates_for_core()
)
if not ok_frp:
print(f"{Colors.FAIL}❌ FRP 启动失败,请检查网络或镜像策略。{Colors.ENDC}")
return
print(f"{Colors.GREEN}✅ 核心服务初始化完成!{Colors.ENDC}")
# ---------- 站点 ----------
def download_wp_zh(self):
target = os.path.join(CACHE_DIR, "latest-zh_CN.zip")
if not os.path.exists(target):
print("正在下载 WordPress 中文包...")
run_cmd(f"wget -O {q(target)} https://cn.wordpress.org/latest-zh_CN.zip", ignore_errors=True)
return target
def _write_site_compose(self, site_dir, site_id, is_wordpress, db_name, db_user, db_pwd, prefix):
redis_service = (
f""" redis:
image: {image_ref(prefix, "redis:alpine")}
restart: always
networks:
- internal_net
"""
if is_wordpress
else ""
)
redis_depends = " - redis\n" if is_wordpress else ""
php_image = image_ref(prefix, "wordpress:fpm-alpine") if is_wordpress else image_ref(prefix, "php:8.2-fpm-alpine")
dns_block = format_dns_block(DEFAULT_DNS)
compose_yml = f"""services:
db:
image: {image_ref(prefix, "mariadb:10.6")}
restart: always
environment:
MYSQL_ROOT_PASSWORD: "{db_pwd}"
MYSQL_DATABASE: "{db_name}"
MYSQL_USER: "{db_user}"
MYSQL_PASSWORD: "{db_pwd}"
volumes:
- ./db_data:/var/lib/mysql
networks:
- internal_net
{redis_service} php_fpm:
image: {php_image}
restart: always
depends_on:
- db
{redis_depends}{dns_block}
volumes:
- ./www_root:/var/www/html
- ./uploads.ini:/usr/local/etc/php/conf.d/uploads.ini
networks:
- internal_net
nginx:
image: {image_ref(prefix, "nginx:alpine")}
container_name: "{site_id}_nginx"
restart: always
{dns_block}
volumes:
- ./www_root:/var/www/html
- ./nginx_conf/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx_conf/default.conf:/etc/nginx/conf.d/default.conf:ro
- ./nginx_cache:/var/cache/nginx/wordpress
networks:
- internal_net
- web_network
networks:
web_network:
external: true
internal_net:
driver: bridge
"""
write_file(os.path.join(site_dir, "docker-compose.yml"), compose_yml)
def _write_site_nginx_conf(self, site_dir, is_wordpress):
if is_wordpress:
nginx_conf = """user nginx;
worker_processes auto;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
keepalive_requests 1000;
open_file_cache max=10000 inactive=60s;
open_file_cache_valid 120s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
gzip on;
gzip_comp_level 5;
gzip_min_length 1024;
gzip_vary on;
gzip_types text/plain text/css application/javascript application/json application/xml image/svg+xml;
fastcgi_cache_path /var/cache/nginx/wordpress levels=1:2 keys_zone=WP:200m inactive=30m max_size=5g use_temp_path=off;
fastcgi_cache_key $scheme$request_method$host$request_uri;
fastcgi_cache_methods GET HEAD;
fastcgi_cache_lock on;
fastcgi_cache_lock_timeout 10s;
fastcgi_cache_use_stale error timeout updating http_500;
fastcgi_cache_background_update on;
limit_req_zone $binary_remote_addr zone=logins:10m rate=10r/m;
include /etc/nginx/mime.types;
default_type application/octet-stream;
client_max_body_size 50M;
include /etc/nginx/conf.d/*.conf;
}
"""
default_conf = r"""server {
listen 80;
root /var/www/html;
index index.php;
set $skip_cache 0;
if ($request_method = POST) { set $skip_cache 1; }
if ($query_string != "") { set $skip_cache 1; }
if ($request_uri ~* "/wp-admin/|/wp-login.php|/wp-json/|/xmlrpc.php") { set $skip_cache 1; }
if ($http_cookie ~* "wordpress_logged_in|comment_author|wp-postpass|woocommerce_items_in_cart|woocommerce_cart_hash") { set $skip_cache 1; }
location = /favicon.ico { log_not_found off; access_log off; }
location = /robots.txt { allow all; log_not_found off; access_log off; }
location ~* \.(css|js|jpg|jpeg|png|gif|ico|svg|webp|woff2?)$ {
expires 5s;
access_log off;
add_header Cache-Control "public, max-age=5";
}
location / { try_files $uri $uri/ /index.php?$args; }
location = /wp-login.php {
limit_req zone=logins burst=20 nodelay;
try_files $uri =404;
fastcgi_pass php_fpm:9000;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
}
location /wp-admin/ { try_files $uri $uri/ /wp-admin/index.php?$args; }
location = /xmlrpc.php { deny all; }
location ~ \.php$ {
fastcgi_pass php_fpm:9000;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_cache WP;
fastcgi_cache_valid 200 301 302 5s;
fastcgi_cache_valid 404 5s;
fastcgi_cache_bypass $skip_cache;
fastcgi_no_cache $skip_cache;
add_header X-FastCGI-Cache $upstream_cache_status;
}
}
"""
else:
nginx_conf = """user nginx;
worker_processes auto;
pid /var/run/nginx.pid;
events {
worker_connections 2048;
multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 5;
gzip_min_length 1024;
gzip_vary on;
gzip_types text/plain text/css application/javascript application/json application/xml image/svg+xml;
include /etc/nginx/mime.types;
default_type application/octet-stream;
client_max_body_size 20M;
include /etc/nginx/conf.d/*.conf;
}
"""
default_conf = r"""server {
listen 80;
root /var/www/html;
index index.php index.html;
location / { try_files $uri $uri/ /index.php?$args; }
location ~ \.php$ {
fastcgi_pass php_fpm:9000;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
}
}
"""
write_file(os.path.join(site_dir, "nginx_conf", "nginx.conf"), nginx_conf)
write_file(os.path.join(site_dir, "nginx_conf", "default.conf"), default_conf)
def light_init_cache_cleanup(self, site_id):
"""
新站点初始化收尾动作(轻量清缓存):
- 清 nginx_cache
- 重启站点 nginx(不动 db/php)
目的:避免首次访问时 NPM 默认页/旧 upstream 结果被缓存造成混淆
"""
site_dir = os.path.join(BASE_DIR, site_id)
cache_path = os.path.join(site_dir, "nginx_cache")
if os.path.isdir(cache_path):
run_cmd(f"rm -rf {q(cache_path)}/*", ignore_errors=True)
run_cmd(f"cd {q(site_dir)} && docker compose restart nginx", ignore_errors=True)
def add_site(self):
print(f"\n{Colors.HEADER}--- 新建站点 ---{Colors.ENDC}")
raw_id = input("请输入站点标识(建议域名,如 kaixinit.cn): ").strip()
if not raw_id:
return
site_id = normalize_site_id(raw_id)
site_dir = os.path.join(BASE_DIR, site_id)
if os.path.exists(site_dir):
print(f"{Colors.FAIL}❌ 站点目录已存在!{Colors.ENDC}")
pause()
return
print("请选择安装包类型:")
print("1) WordPress (可选 Redis)")
print("2) 其他 PHP 程序")
pkg_choice = input("请选择 (1/2): ").strip()
is_wordpress = (pkg_choice != "2")
if is_wordpress:
print(f"{Colors.CYAN}提示: 如安装 Redis 插件,请站点安装完成后用菜单「Redis 优化」写入配置。{Colors.ENDC}")
dirs = ["db_data", "redis_data", "www_root", "nginx_conf", "nginx_cache"]
ensure_dir(site_dir)
for d in dirs:
ensure_dir(os.path.join(site_dir, d))
# 选择安装包
ensure_dir(CACHE_DIR)
cache_files = [f for f in os.listdir(CACHE_DIR) if os.path.isfile(os.path.join(CACHE_DIR, f))]
if cache_files:
print("可用安装包缓存:")
for name in sorted(cache_files):
print(f" - {name}")
if is_wordpress:
zip_name = input("请输入安装包文件名(回车=下载最新WP中文包): ").strip()
else:
zip_name = input("请输入安装包文件名: ").strip()
if zip_name:
zip_path = os.path.join(CACHE_DIR, zip_name)
if not os.path.exists(zip_path):
print(f"{Colors.FAIL}❌ 安装包不存在: {zip_name}{Colors.ENDC}")
pause()
return
else:
if not is_wordpress:
print(f"{Colors.FAIL}❌ 非 WordPress 程序必须指定安装包。{Colors.ENDC}")
pause()
return
zip_path = self.download_wp_zh()
print("解压安装包...")
temp_dir = os.path.join(site_dir, "temp")
ensure_dir(temp_dir)
run_cmd(f"unzip -oq {q(zip_path)} -d {q(temp_dir)}", ignore_errors=True)
if os.path.exists(os.path.join(temp_dir, "wordpress")):
run_cmd(f"mv {q(os.path.join(temp_dir,'wordpress'))}/* {q(os.path.join(site_dir,'www_root'))}/", ignore_errors=True)
else:
run_cmd(f"mv {q(temp_dir)}/* {q(os.path.join(site_dir,'www_root'))}/", ignore_errors=True)
remove_tree(temp_dir)
# DB info
default_db_name = site_id.replace(".", "_")
default_db_user = site_id.replace(".", "_")
default_db_pwd = generate_password()
db_name = input(f"数据库名 (默认 {default_db_name}): ").strip() or default_db_name
db_user = input(f"数据库用户 (默认 {default_db_user}): ").strip() or default_db_user
db_pwd = input("数据库密码 (默认自动生成): ").strip() or default_db_pwd
db_host = "db"
upstream = f"{site_id}_nginx"
write_site_info(site_dir, {
"site_id": site_id,
"db_name": db_name,
"db_user": db_user,
"db_password": db_pwd,
"db_host": db_host,
"upstream": upstream,
"created_at": time.strftime("%Y-%m-%d %H:%M:%S"),
})
# php uploads
write_file(
os.path.join(site_dir, "uploads.ini"),
"upload_max_filesize = 50M\npost_max_size = 50M\nmemory_limit = 256M\n"
)
# nginx conf
self._write_site_nginx_conf(site_dir, is_wordpress)
# compose: use mirror strategy (DaoCloud prefix -> DockerProxy prefix -> no-prefix)
def compose_writer(prefix):
self._write_site_compose(site_dir, site_id, is_wordpress, db_name, db_user, db_pwd, prefix)
print("启动容器中...")
ok = self._try_pull_with_prefix(site_dir, compose_writer, self._prefix_candidates_for_site())
if not ok:
set_marker(os.path.join(site_dir, ".failed"), "compose_up_failed\n")
print(f"{Colors.FAIL}❌ 站点未配置成功,已标记失败,不会出现在清单。{Colors.ENDC}")
pause()
return
# 提示 WP 安装
if is_wordpress:
time.sleep(2)
wp_config = os.path.join(site_dir, "www_root", "wp-config.php")
if not has_marker(wp_config):
print(f"{Colors.WARNING}⚠️ WordPress 尚未完成安装:请先在 NPM 添加 Proxy Host 后访问域名完成安装。{Colors.ENDC}")
# 权限修复
self.fix_perms(site_id)
# 新站点初始化收尾动作:轻量清缓存
self.light_init_cache_cleanup(site_id)
print(f"\n{Colors.GREEN}✅ 站点 {site_id} 部署成功!{Colors.ENDC}")
print(f"🗂 数据库名: {db_name}")
print(f"👤 数据库用户: {db_user}")
print(f"🔑 数据库密码: {db_pwd}")
print(f"🧭 数据库主机: {db_host}")
print(f"🔗 NPM Upstream(转发主机名): {Colors.BOLD}{upstream}{Colors.ENDC}")
print(f"NPM 管理地址: http://{get_primary_ip()}:81")
print(f"{Colors.WARNING}提示: 访问域名若是默认页,请在 NPM 添加 Proxy Host 指向 Upstream。{Colors.ENDC}")
pause()
def fix_perms(self, specific_site=None):
"""修复权限 (php-fpm alpine 常见 uid=82, nginx=101)"""
sites = [specific_site] if specific_site else self.get_site_list(include_pending=True, include_failed=True)
for site in sites:
path = os.path.join(BASE_DIR, site)
if not os.path.isdir(path):
continue
print(f"正在修复权限: {site}")
ensure_dir(os.path.join(path, "www_root", "wp-content", "upgrade"))
ensure_dir(os.path.join(path, "nginx_cache"))
run_cmd(f"chown -R 82:82 {q(os.path.join(path,'www_root'))}", ignore_errors=True)
run_cmd(f"chown -R 101:101 {q(os.path.join(path,'nginx_cache'))}", ignore_errors=True)
run_cmd(f"find {q(os.path.join(path,'www_root'))} -type d -exec chmod 755 {{}} \\;", ignore_errors=True)
run_cmd(f"find {q(os.path.join(path,'www_root'))} -type f -exec chmod 644 {{}} \\;", ignore_errors=True)
# ---------- 站点列表(美化) ----------
def get_site_list(self, include_pending=False, include_failed=False):
exclude = ["proxy", "frpc", "cache", "scripts", "backups", "imports"]
sites = []
try:
entries = os.listdir(BASE_DIR)
except Exception:
entries = get_output(f"ls -1 {q(BASE_DIR)}").splitlines()
for d in entries:
if not d or d in exclude:
continue
site_dir = os.path.join(BASE_DIR, d)
if not os.path.isdir(site_dir):
continue
compose_path = os.path.join(site_dir, "docker-compose.yml")
if not has_marker(compose_path):
continue
failed_marker = os.path.join(site_dir, ".failed")
if has_marker(failed_marker) and not include_failed:
continue
sites.append(d)
return sorted(sites)
def get_all_site_dirs(self):
exclude = ["proxy", "frpc", "cache", "scripts", "backups", "imports"]
dirs = []
try:
entries = os.listdir(BASE_DIR)
except Exception:
entries = get_output(f"ls -1 {q(BASE_DIR)}").splitlines()
for d in entries:
if not d or d in exclude:
continue
site_dir = os.path.join(BASE_DIR, d)
if os.path.isdir(site_dir):
dirs.append(d)
return sorted(dirs)
def print_sites_table(self, sites):
# 使用 cat 输出,确保你测试环境直观看到
# 列:编号 | 站点ID | 状态 | DB名 | DB用户 | DB密码(脱敏) | Upstream
rows = []
for idx, site in enumerate(sites, 1):
site_dir = os.path.join(BASE_DIR, site)
status = "OK"
if has_marker(os.path.join(site_dir, ".failed")):
status = "FAILED"
info, _ = ensure_site_info(site_dir)
db_name = info.get("db_name", "Unknown")
db_user = info.get("db_user", "Unknown")
db_pwd = mask_password(info.get("db_password", ""), keep=3) or "Unknown"
upstream = info.get("upstream", f"{site}_nginx")
rows.append([str(idx), site, status, db_name, db_user, db_pwd, upstream])
headers = ["#", "SiteID", "Status", "DB_Name", "DB_User", "DB_Pass", "Upstream"]
widths = [len(h) for h in headers]
for r in rows:
for i, v in enumerate(r):
widths[i] = max(widths[i], len(str(v)))
def line(sep="+", fill="-"):
return sep + sep.join(fill * (w + 2) for w in widths) + sep
def fmt_row(cols):
out = "|"
for i, c in enumerate(cols):
s = str(c)
out += " " + s.ljust(widths[i]) + " |"
return out
table = []
table.append(line())
table.append(fmt_row(headers))
table.append(line())
for r in rows:
table.append(fmt_row(r))
table.append(line())
content = "\n".join(table) + "\n"
tmp = f"/tmp/sites_table_{int(time.time())}.txt"
write_file(tmp, content)
run_cmd(f"cat {q(tmp)}", ignore_errors=True)
run_cmd(f"rm -f {q(tmp)}", ignore_errors=True)
def show_site_detail(self, site_id):
site_dir = os.path.join(BASE_DIR, site_id)
info, _ = ensure_site_info(site_dir)
status = "OK" if not has_marker(os.path.join(site_dir, ".failed")) else "FAILED"
db_pwd = info.get("db_password", "")
detail = []
detail.append(f"{Colors.HEADER}================ 站点详情 ================{Colors.ENDC}")
detail.append(f"站点ID : {site_id}")
detail.append(f"状态 : {status}")
detail.append(f"创建时间 : {info.get('created_at','-')}")
detail.append("")
detail.append("---- 数据库信息 ----")
detail.append(f"DB 名称 : {info.get('db_name','-')}")
detail.append(f"DB 用户 : {info.get('db_user','-')}")
detail.append(f"DB 密码 : {db_pwd or '-'}")
detail.append(f"DB 主机 : {info.get('db_host','db')}")
detail.append("")
detail.append("---- NPM 转发 ----")
detail.append(f"Upstream : {info.get('upstream', f'{site_id}_nginx')}")
detail.append("")
detail.append("---- 路径 ----")
detail.append(f"站点目录 : {site_dir}")
detail.append(f"WP 根目录 : {os.path.join(site_dir,'www_root')}")
detail.append(f"{Colors.HEADER}=========================================={Colors.ENDC}")
text = "\n".join(detail) + "\n"
tmp = f"/tmp/site_detail_{int(time.time())}.txt"
write_file(tmp, text)
run_cmd(f"cat {q(tmp)}", ignore_errors=True)
run_cmd(f"rm -f {q(tmp)}", ignore_errors=True)
def list_sites_menu(self):
print(f"\n{Colors.HEADER}--- 站点列表 (输入编号查看详情) ---{Colors.ENDC}")
sites = self.get_all_site_dirs()
if not sites:
print(f"{Colors.WARNING}暂无站点。{Colors.ENDC}")
pause()
return
self.print_sites_table(sites)
choice = input("输入编号查看详情(回车返回): ").strip()
if not choice:
return
idx = safe_int(choice, -1) - 1
if idx < 0 or idx >= len(sites):
print(f"{Colors.WARNING}编号无效。{Colors.ENDC}")
pause()
return
self.show_site_detail(sites[idx])
pause()
# ---------- 缓存相关 ----------
def clear_site_cache(self, site_id):
path = os.path.join(BASE_DIR, site_id, "nginx_cache")
if os.path.isdir(path):
run_cmd(f"rm -rf {q(path)}/*", ignore_errors=True)
run_cmd(f"cd {q(os.path.join(BASE_DIR, site_id))} && docker compose restart nginx", ignore_errors=True)
print(f"{Colors.GREEN}✅ 已清理缓存并重启 Nginx: {site_id}{Colors.ENDC}")
def cache_menu(self):
print(f"\n{Colors.HEADER}--- 清理缓存 ---{Colors.ENDC}")
sites = self.get_site_list(include_pending=True, include_failed=True)
if not sites:
print(f"{Colors.WARNING}未找到站点。{Colors.ENDC}")
pause()
return
for i, site in enumerate(sites, 1):
print(f"{i:<4} {site}")
choice = input("请输入编号(回车取消): ").strip()
if not choice:
return
idx = safe_int(choice, -1) - 1
if idx < 0 or idx >= len(sites):
print(f"{Colors.WARNING}编号无效。{Colors.ENDC}")
pause()
return
self.clear_site_cache(sites[idx])
pause()
def update_cache_ttl(self, site_id, seconds):
conf_path = os.path.join(BASE_DIR, site_id, "nginx_conf", "default.conf")
content = read_file(conf_path)
if not content:
print(f"{Colors.WARNING}跳过 {site_id}: 未找到 default.conf{Colors.ENDC}")
return False
updated = content
updated = re.sub(r"expires\s+\d+[smhd];", f"expires {seconds}s;", updated)
updated = re.sub(r'Cache-Control "public, max-age=\d+"', f'Cache-Control "public, max-age={seconds}"', updated)
updated = re.sub(r"fastcgi_cache_valid\s+200\s+301\s+302\s+\d+[smhd];", f"fastcgi_cache_valid 200 301 302 {seconds}s;", updated)
updated = re.sub(r"fastcgi_cache_valid\s+404\s+\d+[smhd];", f"fastcgi_cache_valid 404 {seconds}s;", updated)
if updated == content:
print(f"{Colors.WARNING}跳过 {site_id}: 未匹配到缓存配置{Colors.ENDC}")
return False
write_file(conf_path, updated)
run_cmd(f"cd {q(os.path.join(BASE_DIR, site_id))} && docker compose restart nginx", ignore_errors=True)
print(f"{Colors.GREEN}✅ 已更新缓存时间并重启 Nginx: {site_id}{Colors.ENDC}")
return True
def cache_ttl_menu(self):
print(f"\n{Colors.HEADER}--- 批量调整缓存时间 ---{Colors.ENDC}")
print(f"{Colors.CYAN}当前默认静态/fastcgi 缓存是 5s(新站点配置){Colors.ENDC}")
sites = self.get_site_list(include_pending=True, include_failed=True)
if not sites:
print(f"{Colors.WARNING}未找到站点。{Colors.ENDC}")
pause()
return
ttl_input = input("请输入缓存时间(秒): ").strip()
if not ttl_input.isdigit():
print(f"{Colors.WARNING}请输入有效数字秒数。{Colors.ENDC}")
pause()
return
seconds = int(ttl_input)
if seconds < 1:
print(f"{Colors.WARNING}秒数需大于 0。{Colors.ENDC}")
pause()
return
for i, site in enumerate(sites, 1):
print(f"{i:<4} {site}")
choice = input("请输入编号(回车=全部): ").strip()
if choice:
idx = safe_int(choice, -1) - 1
if idx < 0 or idx >= len(sites):
print(f"{Colors.WARNING}编号无效。{Colors.ENDC}")
pause()
return
self.update_cache_ttl(sites[idx], seconds)
else:
for site in sites:
self.update_cache_ttl(site, seconds)
pause()
def redis_optimize(self):
print(f"\n{Colors.HEADER}--- Redis 优化 (写入 WP 配置) ---{Colors.ENDC}")
target = input("输入站点 ID (回车=全部): ").strip()
sites = [target] if target else self.get_site_list(include_pending=True, include_failed=True)
for site in sites:
site_root = os.path.join(BASE_DIR, site, "www_root")
wp_config = os.path.join(site_root, "wp-config.php")
content = read_file(wp_config)
if not content:
print(f"{Colors.WARNING}跳过 {site}: 未找到 wp-config.php{Colors.ENDC}")
continue
plugin_paths = [
os.path.join(site_root, "wp-content", "plugins", "redis-cache"),
os.path.join(site_root, "wp-content", "plugins", "wp-redis"),
]
if not any(os.path.isdir(p) for p in plugin_paths):
print(f"{Colors.WARNING}提示 {site}: 未检测到 Redis 插件 (redis-cache/wp-redis){Colors.ENDC}")
if "WP_REDIS_HOST" in content:
print(f"{Colors.CYAN}{site} 已配置 Redis,跳过。{Colors.ENDC}")
continue
redis_config = "\ndefine('WP_REDIS_HOST', 'redis');\ndefine('WP_REDIS_PORT', 6379);\n"
marker = "/* That's all, stop editing"
if marker in content:
content = content.replace(marker, redis_config + marker)
else:
content += redis_config
write_file(wp_config, content)
print(f"{Colors.GREEN}✅ 已为 {site} 写入 Redis 配置。{Colors.ENDC}")
pause()
# ---------- 备份/恢复 ----------
def backup_sites(self):
print(f"\n{Colors.HEADER}--- 全站备份 ---{Colors.ENDC}")
ensure_dir(BACKUP_DIR)
date_str = time.strftime("%Y%m%d_%H%M")
sites = self.get_site_list(include_pending=True, include_failed=False)
for site in sites:
print(f"正在备份 {site} ...")
site_path = os.path.join(BASE_DIR, site)
db_sql = os.path.join(site_path, "db.sql")
try:
db_container = get_output(f"cd {q(site_path)} && docker compose ps -q db")
info, _ = ensure_site_info(site_path)
db_name = info.get("db_name", site.replace(".","_"))
db_pwd = info.get("db_password", "")
if db_container and db_pwd:
run_cmd(f"docker exec {db_container} mysqldump -uroot -p{q(db_pwd)} {q(db_name)} > {q(db_sql)}",
ignore_errors=False)
tar_name = f"{site}_{date_str}.tar.gz"
run_cmd(f"tar -czf {q(os.path.join(BACKUP_DIR, tar_name))} -C {q(BASE_DIR)} {q(site)}", ignore_errors=False)
if os.path.exists(db_sql):
run_cmd(f"rm -f {q(db_sql)}", ignore_errors=True)
print(f"{Colors.GREEN}✅ {site} 备份完成: {tar_name}{Colors.ENDC}")
except Exception as e:
print(f"{Colors.FAIL}❌ {site} 备份失败: {e}{Colors.ENDC}")
pause()
def restore_site_from_backup(self):
print(f"\n{Colors.HEADER}--- 从 backups 一键恢复(整站) ---{Colors.ENDC}")
backups = sorted(glob.glob(os.path.join(BACKUP_DIR, "*.tar.gz")), reverse=True)
if not backups:
print("没有找到备份文件。")
pause()
return
for i, b in enumerate(backups, 1):
print(f"{i:<4} {os.path.basename(b)}")
idx = safe_int(input("请选择备份文件编号: ").strip(), -1) - 1
if idx < 0 or idx >= len(backups):
return
backup_file = backups[idx]
site_id = os.path.basename(backup_file).split("_")[0]
print(f"{Colors.CYAN}正在恢复 {site_id} ...{Colors.ENDC}")
run_cmd(f"tar -xzf {q(backup_file)} -C {q(BASE_DIR)}", ignore_errors=True)
run_cmd(f"cd {q(os.path.join(BASE_DIR, site_id))} && docker compose up -d", ignore_errors=True)
self.fix_perms(site_id)
self.light_init_cache_cleanup(site_id)
print(f"{Colors.GREEN}✅ 恢复完成:{site_id}{Colors.ENDC}")
pause()
def delete_site(self):
sites = self.get_all_site_dirs()
print(f"\n{Colors.HEADER}--- 销毁站点 ---{Colors.ENDC}")
if not sites:
print("暂无站点。")
pause()
return
for i, site in enumerate(sites, 1):
status = "OK"
if has_marker(os.path.join(BASE_DIR, site, ".failed")):
status = "FAILED"
print(f"{i:<4} {site} ({status})")
idx = safe_int(input(f"{Colors.WARNING}请输入要销毁的站点编号(不可逆): {Colors.ENDC}").strip(), -1) - 1
if idx < 0 or idx >= len(sites):
return
site_id = sites[idx]
site_path = os.path.join(BASE_DIR, site_id)
backup_choice = input("删除前先备份? (y/N): ").strip().lower()
if backup_choice == "y":
ensure_dir(BACKUP_DIR)
date_str = time.strftime("%Y%m%d_%H%M")
db_sql = os.path.join(site_path, "db.sql")
try:
db_container = get_output(f"cd {q(site_path)} && docker compose ps -q db")
info, _ = ensure_site_info(site_path)
db_name = info.get("db_name", site_id.replace(".","_"))
db_pwd = info.get("db_password","")
if db_container and db_pwd:
run_cmd(f"docker exec {db_container} mysqldump -uroot -p{q(db_pwd)} {q(db_name)} > {q(db_sql)}",
ignore_errors=True)
tar_name = f"{site_id}_{date_str}.tar.gz"
run_cmd(f"tar -czf {q(os.path.join(BACKUP_DIR, tar_name))} -C {q(BASE_DIR)} {q(site_id)}", ignore_errors=True)
run_cmd(f"rm -f {q(db_sql)}", ignore_errors=True)
print(f"{Colors.GREEN}✅ 备份完成: {tar_name}{Colors.ENDC}")
except Exception as e:
print(f"{Colors.FAIL}❌ 备份失败: {e}{Colors.ENDC}")
confirm = input(f"确认删除 {site_id}? (y/N): ").strip().lower()
if confirm != "y":
print("已取消。")
time.sleep(1)
return
print("停止并移除容器...")
run_cmd(f"cd {q(site_path)} && docker compose down -v", ignore_errors=True)
print("删除文件...")
remove_tree(site_path)
print(f"{Colors.GREEN}✅ 已销毁:{site_id}{Colors.ENDC}")
pause()
# ---------- 系统/急救/上线 ----------
def monitor(self):
run_cmd("clear")
print("--- 系统资源监控 ---")
run_cmd("free -h", ignore_errors=True)
print("\n--- Docker 统计 ---")
run_cmd("docker stats --no-stream --format 'table {{.Name}}\\t{{.CPUPerc}}\\t{{.MemUsage}}'", ignore_errors=True)
pause()
def download_cache(self):
ensure_dir(CACHE_DIR)
run_cmd("clear")
print("----------------------------------------------------")
print("安装包缓存管理")
try:
cache_list = sorted(os.listdir(CACHE_DIR))
except Exception:
cache_list = []
if cache_list:
for name in cache_list:
path = os.path.join(CACHE_DIR, name)
if os.path.isfile(path):
size = os.path.getsize(path)
print(f" {name} ({size} bytes)")
else:
print(" (暂无缓存)")
print("----------------------------------------------------")
choice = input("下载新包? (y/n): ").strip().lower()
if choice != "y":
return
url = input("URL: ").strip()
if not url:
return
default_name = os.path.basename(url)
name = input(f"文件名 (默认 {default_name}): ").strip() or default_name
target = os.path.join(CACHE_DIR, name)
print("下载中...")
if run_cmd(f"curl -L -o {q(target)} {q(url)}", ignore_errors=True):
print("下载完成。")
pause()
def deep_fix(self):
run_cmd("clear")
print("----------------------------------------------------")
print("访问修复(急救) & 自启锁定")
print("----------------------------------------------------")
choice = input("确认执行修复? (y/n): ").strip().lower()
if choice != "y":
return
print("清理端口占用...")
run_cmd("systemctl stop apache2 nginx", ignore_errors=True)
run_cmd("systemctl disable apache2 nginx", ignore_errors=True)
run_cmd("killall apache2 nginx", ignore_errors=True)
print("检查网络...")
ensure_docker_network("web_network")
run_cmd("docker network connect web_network frpc-frpc-1", ignore_errors=True)
run_cmd("docker network connect web_network proxy-app-1", ignore_errors=True)
for site in self.get_site_list(include_pending=True, include_failed=True):
run_cmd(f"docker network connect web_network {q(site + '_nginx')}", ignore_errors=True)
print("刷新网关...")
run_cmd("docker restart proxy-app-1", ignore_errors=True)
time.sleep(2)
print("重连穿透...")
run_cmd("docker restart frpc-frpc-1", ignore_errors=True)
print("锁定自启...")
run_cmd("docker update --restart=always $(docker ps -a -q)", ignore_errors=True)
print("修复完成。")
pause()
def backup_online(self):
run_cmd("clear")
print("----------------------------------------------------")
print("备用机上线(一键)")
print("----------------------------------------------------")
print("将执行:清端口 -> 启动所有compose -> 挂载web_network -> 重启NPM/FRP -> 锁定自启")
choice = input("确认执行上线操作? (y/n): ").strip().lower()
if choice != "y":
return
print("清理端口占用...")
run_cmd("systemctl stop apache2 nginx", ignore_errors=True)
run_cmd("systemctl disable apache2 nginx", ignore_errors=True)
run_cmd("killall apache2 nginx", ignore_errors=True)
print("创建网络环境...")
ensure_docker_network("web_network")
print("启动所有站点容器...")
for root, _, files in os.walk(BASE_DIR):
if "docker-compose.yml" in files and root not in (PROXY_DIR, FRPC_DIR):
run_cmd(f"cd {q(root)} && docker compose up -d", ignore_errors=True)
print("启动核心服务...")
run_cmd(f"cd {q(PROXY_DIR)} && docker compose up -d", ignore_errors=True)
run_cmd(f"cd {q(FRPC_DIR)} && docker compose up -d", ignore_errors=True)
print("挂载网络连接...")
run_cmd("docker network connect web_network frpc-frpc-1", ignore_errors=True)
run_cmd("docker network connect web_network proxy-app-1", ignore_errors=True)
for site in self.get_site_list(include_pending=True, include_failed=True):
run_cmd(f"docker network connect web_network {q(site + '_nginx')}", ignore_errors=True)
print("刷新服务与锁定自启...")
run_cmd("docker restart proxy-app-1", ignore_errors=True)
time.sleep(2)
run_cmd("docker restart frpc-frpc-1", ignore_errors=True)
run_cmd("docker update --restart=always $(docker ps -a -q)", ignore_errors=True)
print("备用机上线完成。")
pause()
# ---------- 导入:DB / WP 增量 / 整站 ----------
def _extract_import_to_tmp(self, import_file):
tmp_dir = f"/tmp/wp_import_{int(time.time())}"
run_cmd(f"rm -rf {q(tmp_dir)} && mkdir -p {q(tmp_dir)}", ignore_errors=True)
if import_file.endswith(".tar.gz"):
ok = run_cmd(f"tar -xzf {q(import_file)} -C {q(tmp_dir)}", ignore_errors=True)
if not ok:
return ""
elif import_file.endswith(".zip"):
ok = run_cmd(f"unzip -oq {q(import_file)} -d {q(tmp_dir)}", ignore_errors=True)
if not ok:
return ""
else:
return ""
return tmp_dir
def _find_wp_content_dir(self, root_dir):
cand = get_output(f"find {q(root_dir)} -maxdepth 6 -type d -name wp-content | head -n 1 || true").strip()
return cand
def _find_db_sql_in_pkg(self, root_dir):
cand = get_output(f"find {q(root_dir)} -maxdepth 6 -type f -name 'db.sql' | head -n 1 || true").strip()
return cand
def _mysql_exec(self, site_id, sql):
site_dir = os.path.join(BASE_DIR, site_id)
db_container = get_output(f"cd {q(site_dir)} && docker compose ps -q db").strip()
if not db_container:
return False
info, _ = ensure_site_info(site_dir)
db_pwd = info.get("db_password", "")
if not db_pwd:
return False
# run inside db container
cmd = f"docker exec -i {db_container} mysql -uroot -p{q(db_pwd)} -e {q(sql)}"
return run_cmd(cmd, ignore_errors=True)
def _update_wp_domain(self, site_id, new_domain, scheme="http"):
"""
安全更新:只改 wp_options 里的 siteurl / home
"""
if not new_domain:
return True
new_domain = new_domain.strip()
new_url = f"{scheme}://{new_domain}"
site_dir = os.path.join(BASE_DIR, site_id)
info, _ = ensure_site_info(site_dir)
db_name = info.get("db_name", site_id.replace(".","_"))
sql = (
f"USE `{db_name}`;"
f"UPDATE wp_options SET option_value={q(new_url)} WHERE option_name IN ('siteurl','home');"
)
ok = self._mysql_exec(site_id, sql)
if ok:
print(f"{Colors.GREEN}✅ 已更新 WordPress 域名: {new_url}{Colors.ENDC}")
else:
print(f"{Colors.WARNING}⚠️ 域名更新失败(可能表前缀不是 wp_ 或数据库未就绪),你可稍后手动改。{Colors.ENDC}")
return ok
def import_db_from_file(self, import_file, target_site_id, drop_and_recreate=False):
"""
支持 .sql / .sql.gz / .tar.gz(内含db.sql)
"""
site_dir = os.path.join(BASE_DIR, target_site_id)
info, _ = ensure_site_info(site_dir)
db_name = info.get("db_name", target_site_id.replace(".","_"))
db_pwd = info.get("db_password", "")
if not db_pwd:
print(f"{Colors.FAIL}❌ 未找到 DB 密码,无法导入。{Colors.ENDC}")
return False
db_container = get_output(f"cd {q(site_dir)} && docker compose ps -q db").strip()
if not db_container:
print(f"{Colors.FAIL}❌ 未找到 DB 容器,无法导入。{Colors.ENDC}")
return False
# 从 tar/zip 找 db.sql
tmp_dir = ""
db_sql_path = ""
if import_file.endswith(".tar.gz") or import_file.endswith(".zip"):
tmp_dir = self._extract_import_to_tmp(import_file)
if tmp_dir:
db_sql_path = self._find_db_sql_in_pkg(tmp_dir)
elif import_file.endswith(".sql") or import_file.endswith(".sql.gz"):
db_sql_path = import_file
else:
print(f"{Colors.FAIL}❌ 不支持的数据库文件: {os.path.basename(import_file)}{Colors.ENDC}")
return False
if not db_sql_path or not (import_file.endswith(".sql") or import_file.endswith(".sql.gz")):
if db_sql_path and os.path.exists(db_sql_path):
pass
else:
print(f"{Colors.FAIL}❌ 未在包内找到 db.sql,无法导入。{Colors.ENDC}")
if tmp_dir:
run_cmd(f"rm -rf {q(tmp_dir)}", ignore_errors=True)
return False
if drop_and_recreate:
print(f"{Colors.WARNING}⚠️ 将清空并重建数据库:{db_name}{Colors.ENDC}")
run_cmd(
f"docker exec -i {db_container} mysql -uroot -p{q(db_pwd)} -e "
f"{q(f'DROP DATABASE IF EXISTS `{db_name}`; CREATE DATABASE `{db_name}` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')}",
ignore_errors=True
)
print(f"{Colors.CYAN}正在导入数据库到 {db_name} ...{Colors.ENDC}")
if db_sql_path.endswith(".sql.gz"):
cmd = (
f"gunzip -c {q(db_sql_path)} | "
f"docker exec -i {db_container} mysql -uroot -p{q(db_pwd)} {q(db_name)}"
)
ok = run_cmd(cmd, ignore_errors=True)
else:
cmd = (
f"cat {q(db_sql_path)} | "
f"docker exec -i {db_container} mysql -uroot -p{q(db_pwd)} {q(db_name)}"
)
ok = run_cmd(cmd, ignore_errors=True)
if tmp_dir:
run_cmd(f"rm -rf {q(tmp_dir)}", ignore_errors=True)
if ok:
print(f"{Colors.GREEN}✅ 数据库导入完成。{Colors.ENDC}")
else:
print(f"{Colors.FAIL}❌ 数据库导入失败。{Colors.ENDC}")
return ok
def wp_incremental_import(self, target_site_id, import_file, import_db=True, drop_db=False,
dirs_to_sync=None, sync_mode="merge",
prompt_domain=False):
"""
WP 增量导入:
- DB(可选)
- 文件夹(uploads/plugins/themes/...)从 tar.gz/zip 内同步到目标 wp-content
- prompt_domain=True:导完提示更新新域名
"""
if dirs_to_sync is None:
dirs_to_sync = ["uploads", "plugins", "themes"]
site_dir = os.path.join(BASE_DIR, target_site_id)
wp_root = os.path.join(site_dir, "www_root")
target_wp_content = os.path.join(wp_root, "wp-content")
if not os.path.isdir(target_wp_content):
print(f"{Colors.FAIL}❌ 目标站点不是 WordPress 或 wp-content 不存在:{target_site_id}{Colors.ENDC}")
return False
if import_db:
self.import_db_from_file(import_file, target_site_id, drop_and_recreate=drop_db)
# 文件同步需要 tar/zip
if import_file.endswith(".sql") or import_file.endswith(".sql.gz"):
print(f"{Colors.CYAN}仅SQL文件:跳过文件夹同步。{Colors.ENDC}")
else:
tmp_dir = self._extract_import_to_tmp(import_file)
if not tmp_dir:
print(f"{Colors.WARNING}无法解包,跳过文件同步。{Colors.ENDC}")
else:
src_wp_content = self._find_wp_content_dir(tmp_dir)
if not src_wp_content:
print(f"{Colors.WARNING}包内未找到 wp-content,跳过文件同步。{Colors.ENDC}")
else:
print(f"{Colors.CYAN}来源 wp-content: {src_wp_content}{Colors.ENDC}")
for d in dirs_to_sync:
src = os.path.join(src_wp_content, d)
dst = os.path.join(target_wp_content, d)
if not os.path.isdir(src):
print(f"{Colors.WARNING}跳过 {d}: 来源不存在{Colors.ENDC}")
continue
ensure_dir(dst)
if sync_mode == "replace":
print(f"{Colors.WARNING}覆盖模式:将清空目标目录 {dst}{Colors.ENDC}")
run_cmd(f"rm -rf {q(dst)} && mkdir -p {q(dst)}", ignore_errors=True)
print(f"{Colors.GREEN}同步 {d} ...{Colors.ENDC}")
run_cmd(f"rsync -a --info=stats2 {q(src)}/ {q(dst)}/", ignore_errors=True)
run_cmd(f"rm -rf {q(tmp_dir)}", ignore_errors=True)
self.fix_perms(target_site_id)
self.light_init_cache_cleanup(target_site_id)
if prompt_domain and import_db:
self._prompt_update_domain(target_site_id)
print(f"{Colors.GREEN}✅ WP 增量导入完成:{target_site_id}{Colors.ENDC}")
return True
def import_full_site_from_tar(self, tar_file, target_site_id, overwrite=False,
import_db=True, drop_db=False, prompt_domain=False):
"""
整站导入:tar.gz 解压站点目录到 /data/docker_sites/<target_site_id>
可选导 db.sql;可选更新新域名(home/siteurl)
"""
if not (tar_file.endswith(".tar.gz") or tar_file.endswith(".zip")):
print(f"{Colors.FAIL}❌ 整站导入仅支持 .tar.gz/.zip: {tar_file}{Colors.ENDC}")
return False
ensure_dir(BASE_DIR)
dst_dir = os.path.join(BASE_DIR, target_site_id)
if os.path.exists(dst_dir):
if not overwrite:
print(f"{Colors.FAIL}❌ 目标站点目录已存在: {dst_dir}{Colors.ENDC}")
return False
print(f"{Colors.WARNING}⚠️ 覆盖导入:先删除 {dst_dir}{Colors.ENDC}")
remove_tree(dst_dir)
tmp_dir = self._extract_import_to_tmp(tar_file)
if not tmp_dir:
print(f"{Colors.FAIL}❌ 解包失败。{Colors.ENDC}")
return False
# 识别顶层站点目录(取第一层)
top = get_output(f"ls -1 {q(tmp_dir)} | head -n 1 || true").strip()
if not top:
run_cmd(f"rm -rf {q(tmp_dir)}", ignore_errors=True)
print(f"{Colors.FAIL}❌ 无法识别包结构。{Colors.ENDC}")
return False
extracted_dir = os.path.join(tmp_dir, top)
if not os.path.isdir(extracted_dir):
run_cmd(f"rm -rf {q(tmp_dir)}", ignore_errors=True)
print(f"{Colors.FAIL}❌ 包结构异常:未找到目录。{Colors.ENDC}")
return False
# 移动到 BASE_DIR
run_cmd(f"mv {q(extracted_dir)} {q(dst_dir)}", ignore_errors=True)
run_cmd(f"rm -rf {q(tmp_dir)}", ignore_errors=True)
# 验证 compose
yml = os.path.join(dst_dir, "docker-compose.yml")
if not has_marker(yml):
print(f"{Colors.FAIL}❌ 整站包缺少 docker-compose.yml:{dst_dir}{Colors.ENDC}")
return False
ensure_site_info(dst_dir)
print(f"{Colors.CYAN}启动站点容器...{Colors.ENDC}")
run_cmd(f"cd {q(dst_dir)} && docker compose up -d", ignore_errors=True)
self.fix_perms(target_site_id)
if import_db:
db_sql = os.path.join(dst_dir, "db.sql")
if os.path.exists(db_sql):
self.import_db_from_file(db_sql, target_site_id, drop_and_recreate=drop_db)
else:
print(f"{Colors.WARNING}未发现 {db_sql},跳过数据库导入。{Colors.ENDC}")
self.light_init_cache_cleanup(target_site_id)
if prompt_domain and import_db:
self._prompt_update_domain(target_site_id)
print(f"{Colors.GREEN}✅ 整站导入完成:{target_site_id}{Colors.ENDC}")
return True
def _prompt_update_domain(self, site_id):
ans = input("是否更新 WordPress 新域名(siteurl/home)?(Y/n): ").strip().lower()
if ans == "n":
return
dom = input("请输入新域名(例如 www.new.com,回车跳过): ").strip()
if not dom:
return
scheme = input("协议选择:1) http 2) https [默认1]: ").strip()
scheme = "https" if scheme == "2" else "http"
self._update_wp_domain(site_id, dom, scheme=scheme)
def import_menu(self):
print(f"\n{Colors.HEADER}--- 导入外部备份 (imports) ---{Colors.ENDC}")
ensure_dir(IMPORT_DIR)
files = list_import_files()
if not files:
print(f"{Colors.WARNING}imports 目录为空:{IMPORT_DIR}{Colors.ENDC}")
print("请上传到该目录:.tar.gz / .zip / .sql / .sql.gz")
pause()
return
print("请选择导入类型:")
print("1) 只导入数据库(导入到现有站点)")
print("2) WordPress 增量导入(DB + uploads/plugins/themes 等)")
print("3) 导入整站(解压站点目录+启动,可选导 db.sql)")
mode = input("选择(1/2/3,回车取消): ").strip()
if not mode:
return
if mode not in {"1", "2", "3"}:
print(f"{Colors.WARNING}编号无效。{Colors.ENDC}")
pause()
return
print(f"\n{Colors.CYAN}imports 文件:{Colors.ENDC}")
for i, f in enumerate(files, 1):
print(f"{i:<4} {os.path.basename(f)}")
fidx = safe_int(input("请选择文件编号(回车取消): ").strip(), -1) - 1
if fidx < 0 or fidx >= len(files):
return
import_file = files[fidx]
# 目标站点选择(模式3也允许新ID)
sites = self.get_site_list(include_pending=True, include_failed=True)
if mode in {"1", "2"}:
if not sites:
print(f"{Colors.WARNING}暂无目标站点,请先新建站点。{Colors.ENDC}")
pause()
return
print(f"\n{Colors.CYAN}目标站点列表:{Colors.ENDC}")
for i, s in enumerate(sites, 1):
print(f"{i:<4} {s}")
sidx = safe_int(input("请选择目标站点编号(回车取消): ").strip(), -1) - 1
if sidx < 0 or sidx >= len(sites):
return
target_site = sites[sidx]
if mode == "1":
drop = input("导入前是否清空目标库并重建?(y/N): ").strip().lower() == "y"
ok = self.import_db_from_file(import_file, target_site, drop_and_recreate=drop)
if ok:
self._prompt_update_domain(target_site)
self.light_init_cache_cleanup(target_site)
pause()
return
if mode == "2":
print("\n选择要同步的 WordPress 目录(默认 123=uploads/plugins/themes):")
print("1) uploads")
print("2) plugins")
print("3) themes")
print("4) mu-plugins")
print("5) languages")
pick = input("输入编号组合(如 123,回车默认 123): ").strip() or "123"
mapping = {"1": "uploads", "2": "plugins", "3": "themes", "4": "mu-plugins", "5": "languages"}
dirs = []
for ch in pick:
if ch in mapping and mapping[ch] not in dirs:
dirs.append(mapping[ch])
if not dirs:
dirs = ["uploads", "plugins", "themes"]
sync_mode = input("文件同步模式:1) 合并(推荐) 2) 覆盖(先清空目标目录) [默认1]: ").strip()
sync_mode = "replace" if sync_mode == "2" else "merge"
import_db = input("是否导入数据库?(Y/n): ").strip().lower() != "n"
drop_db = False
if import_db:
drop_db = input("导入前是否清空目标库并重建?(y/N): ").strip().lower() == "y"
self.wp_incremental_import(
target_site_id=target_site,
import_file=import_file,
import_db=import_db,
drop_db=drop_db,
dirs_to_sync=dirs,
sync_mode=sync_mode,
prompt_domain=True
)
pause()
return
# mode == "3"
if not (import_file.endswith(".tar.gz") or import_file.endswith(".zip")):
print(f"{Colors.WARNING}整站导入建议使用 .tar.gz/.zip。{Colors.ENDC}")
pause()
return
new_id = normalize_site_id(input("请输入导入后的站点ID(例如 newsite.cn,回车取消): ").strip())
if not new_id:
return
overwrite = input("若同名目录存在,是否覆盖导入(会删除旧目录)?(y/N): ").strip().lower() == "y"
import_db = input("是否尝试导入包内 db.sql?(Y/n): ").strip().lower() != "n"
drop_db = False
if import_db:
drop_db = input("导入前是否清空目标库并重建?(y/N): ").strip().lower() == "y"
self.import_full_site_from_tar(
tar_file=import_file,
target_site_id=new_id,
overwrite=overwrite,
import_db=import_db,
drop_db=drop_db,
prompt_domain=True
)
pause()
# ---------- DNS 修复 ----------
def fix_site_dns(self):
print(f"\n{Colors.HEADER}--- 站点 DNS 修复 ---{Colors.ENDC}")
sites = self.get_all_site_dirs()
if not sites:
print("暂无站点。")
pause()
return
updated = 0
for site in sites:
site_dir = os.path.join(BASE_DIR, site)
compose_path = os.path.join(site_dir, "docker-compose.yml")
if not has_marker(compose_path):
continue
changed = clean_duplicate_dns(compose_path, ["php_fpm", "nginx"])
if add_dns_to_compose(compose_path, ["php_fpm", "nginx"], DEFAULT_DNS):
changed = True
if changed:
updated += 1
run_cmd(f"cd {q(site_dir)} && docker compose up -d", ignore_errors=True)
if updated:
print(f"{Colors.GREEN}✅ 已更新 {updated} 个站点 DNS。{Colors.ENDC}")
else:
print(f"{Colors.CYAN}无需更新,站点 DNS 已配置。{Colors.ENDC}")
pause()
def check_and_fix_site_dns(self):
print(f"\n{Colors.HEADER}--- 自动检测并修复 DNS ---{Colors.ENDC}")
sites = self.get_all_site_dirs()
if not sites:
print("暂无站点。")
pause()
return
fixed = 0
ok = 0
failed = 0
for site in sites:
site_dir = os.path.join(BASE_DIR, site)
compose_path = os.path.join(site_dir, "docker-compose.yml")
if not has_marker(compose_path):
continue
if clean_duplicate_dns(compose_path, ["php_fpm", "nginx"]):
run_cmd(f"cd {q(site_dir)} && docker compose up -d", ignore_errors=True)
if check_dns_in_container(site_dir):
ok += 1
continue
if add_dns_to_compose(compose_path, ["php_fpm", "nginx"], DEFAULT_DNS):
run_cmd(f"cd {q(site_dir)} && docker compose up -d", ignore_errors=True)
if check_dns_in_container(site_dir):
fixed += 1
else:
failed += 1
print(f"{Colors.GREEN}✅ 已修复: {fixed}{Colors.ENDC} {Colors.CYAN}正常: {ok}{Colors.ENDC} {Colors.WARNING}失败: {failed}{Colors.ENDC}")
pause()
# ---------- 镜像源设置(仅显示策略,不再让你选一堆不该prefix的源) ----------
def choose_mirror(self):
global INIT_MIRROR_KEY, SITE_MIRROR_KEY
print(f"\n{Colors.HEADER}--- 镜像策略设置 ---{Colors.ENDC}")
print("说明:站点/核心拉取策略固定为:DaoCloud(prefix) -> DockerProxy(prefix) -> no-prefix(daemon mirrors)")
print("你可设置“默认起始策略”,假如你想先从 no-prefix 开始。")
print("1) 初始化安装默认起始策略")
print("2) 新建站点默认起始策略")
scope = input("请选择类型(回车取消): ").strip()
if not scope:
return
if scope not in {"1","2"}:
print(f"{Colors.WARNING}编号无效。{Colors.ENDC}")
pause()
return
print("\n可选起始策略:")
for key, mirror, label in PREFIX_MIRRORS:
tag = ""
if scope == "1" and key == INIT_MIRROR_KEY:
tag = " (当前)"
if scope == "2" and key == SITE_MIRROR_KEY:
tag = " (当前)"
show = mirror if mirror else "no-prefix"
print(f"{key}) {label}: {show}{tag}")
choice = input("请选择编号(回车取消): ").strip()
if not choice:
return
if choice not in {x[0] for x in PREFIX_MIRRORS}:
print(f"{Colors.WARNING}编号无效。{Colors.ENDC}")
pause()
return
if scope == "1":
INIT_MIRROR_KEY = choice
else:
SITE_MIRROR_KEY = choice
print(f"{Colors.GREEN}✅ 已更新默认起始策略。{Colors.ENDC}")
pause()
# ---------- 主菜单 ----------
def main_menu(self):
while True:
os.system("clear")
npm_status = f"{Colors.GREEN}ON{Colors.ENDC}" if get_output("docker ps -q -f name=proxy-app-1") else f"{Colors.FAIL}OFF{Colors.ENDC}"
frp_status = f"{Colors.GREEN}ON{Colors.ENDC}" if get_output("docker ps -q -f name=frpc-frpc-1") else f"{Colors.FAIL}OFF{Colors.ENDC}"
print(f"{Colors.CYAN}===================================================={Colors.ENDC}")
print(f"{Colors.BOLD} 🚀 Docker 全栈管理系统 (Python 终极版 V6.0){Colors.ENDC}")
print(f"{Colors.CYAN}===================================================={Colors.ENDC}")
print(f"网关(NPM): {npm_status} 内网穿透(FRP): {frp_status}")
print(f"NPM 管理地址: http://{get_primary_ip()}:81")
print("-" * 60)
print("1. 🛠 初始化环境 (首次安装)")
print("2. ➕ 新建站点 (WP/PHP) + 新站轻量清缓存")
print("3. 🧾 站点列表 (美化) + 编号查看详情")
print("4. 💾 全站备份 (backups)")
print("5. ♻️ 从 backups 一键恢复 (整站)")
print("6. 🗑 销毁站点")
print("7. 🔄 重启核心服务 (NPM/FRP)")
print("8. 🔧 修复权限 (UID 82/101)")
print("9. 📊 资源监控")
print("10. 📦 下载资源到 cache")
print("11. 🚑 访问修复(急救)")
print("12. 🚀 备用机上线(一键)")
print("13. ⚡ Redis 优化 (写入 wp-config)")
print("14. 🧹 清理缓存(手动)")
print("15. ⏱ 批量调整缓存时间")
print("16. 🪞 镜像策略设置(起始策略)")
print("17. 🧠 站点 DNS 修复(写入dns)")
print("18. 🧪 自动检测并修复 DNS")
print("19. 📥 外部导入 (imports):DB / WP增量 / 整站 + 新域名提示")
print("0. 退出")
print("-" * 60)
choice = input("请选择: ").strip()
if choice == "1":
self.install_dependencies()
self.clean_ports()
self.install_docker()
self.setup_frp_config()
self.deploy_core_services()
print(f"\nNPM 管理地址: http://{get_primary_ip()}:81")
print("提示:如访问域名看到默认页,请先在 NPM 添加 Proxy Host 指向对应站点 upstream。")
pause("初始化完成,按回车继续...")
elif choice == "2":
self.add_site()
elif choice == "3":
self.list_sites_menu()
elif choice == "4":
self.backup_sites()
elif choice == "5":
self.restore_site_from_backup()
elif choice == "6":
self.delete_site()
elif choice == "7":
run_cmd("docker restart proxy-app-1 frpc-frpc-1", ignore_errors=True)
print("核心服务已重启。")
time.sleep(1)
elif choice == "8":
self.fix_perms()
print("✅ 权限修复完成。")
time.sleep(1)
elif choice == "9":
self.monitor()
elif choice == "10":
self.download_cache()
elif choice == "11":
self.deep_fix()
elif choice == "12":
self.backup_online()
elif choice == "13":
self.redis_optimize()
elif choice == "14":
self.cache_menu()
elif choice == "15":
self.cache_ttl_menu()
elif choice == "16":
self.choose_mirror()
elif choice == "17":
self.fix_site_dns()
elif choice == "18":
self.check_and_fix_site_dns()
elif choice == "19":
self.import_menu()
elif choice == "0":
sys.exit(0)
if __name__ == "__main__":
ops = DockerOps()
ops.main_menu()
原创文章,作者:开心电脑网,如若转载,请注明出处。