96 lines
2.9 KiB
Bash
96 lines
2.9 KiB
Bash
#!/bin/bash
|
||
|
||
# vLLM Docker一键安装脚本
|
||
# 功能:自动安装Docker, NVIDIA驱动和容器工具包,并部署vLLM服务
|
||
|
||
set -e
|
||
|
||
# 检查是否为root用户
|
||
if [ "$(id -u)" -ne 0 ]; then
|
||
echo "请使用root用户或通过sudo运行此脚本"
|
||
exit 1
|
||
fi
|
||
|
||
# 获取GPU数量
|
||
read -p "请输入要使用的GPU数量 (例如: 1): " GPU_COUNT
|
||
if ! [[ "$GPU_COUNT" =~ ^[1-9][0-9]*$ ]]; then
|
||
echo "错误: 请输入有效的数字"
|
||
exit 1
|
||
fi
|
||
|
||
# 获取模型名称
|
||
read -p "请输入要下载的模型名称 (例如: meta-llama/Llama-2-7b-chat-hf): " MODEL_NAME
|
||
if [ -z "$MODEL_NAME" ]; then
|
||
echo "错误: 模型名称不能为空"
|
||
exit 1
|
||
fi
|
||
|
||
# 检查NVIDIA显卡
|
||
echo "检查NVIDIA显卡..."
|
||
if ! lspci | grep -i nvidia > /dev/null; then
|
||
echo "未检测到NVIDIA显卡!"
|
||
exit 1
|
||
fi
|
||
|
||
# 安装基础依赖
|
||
echo "安装基础依赖..."
|
||
apt-get update
|
||
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
|
||
|
||
# 安装NVIDIA驱动
|
||
echo "检查NVIDIA驱动..."
|
||
if ! nvidia-smi &> /dev/null; then
|
||
echo "安装NVIDIA驱动..."
|
||
add-apt-repository -y ppa:graphics-drivers/ppa
|
||
apt-get update
|
||
apt-get install -y ubuntu-drivers-common
|
||
ubuntu-drivers autoinstall
|
||
echo "NVIDIA驱动安装完成,需要重启。"
|
||
echo "请重启后再次运行此脚本继续安装。"
|
||
exit 0
|
||
fi
|
||
|
||
# 安装Docker
|
||
echo "检查Docker..."
|
||
if ! command -v docker &> /dev/null; then
|
||
echo "安装Docker..."
|
||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||
apt-get update
|
||
apt-get install -y docker-ce docker-ce-cli containerd.io
|
||
systemctl enable --now docker
|
||
fi
|
||
|
||
# 安装NVIDIA Container Toolkit
|
||
echo "检查NVIDIA Container Toolkit..."
|
||
if ! dpkg -s nvidia-container-toolkit &> /dev/null; then
|
||
echo "安装NVIDIA Container Toolkit..."
|
||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | apt-key add -
|
||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | tee /etc/apt/sources.list.d/nvidia-docker.list
|
||
apt-get update
|
||
apt-get install -y nvidia-container-toolkit
|
||
systemctl restart docker
|
||
fi
|
||
|
||
# 创建vLLM服务
|
||
echo "部署vLLM容器..."
|
||
docker run -d --gpus all \
|
||
--shm-size=1g \
|
||
--ulimit memlock=-1 \
|
||
--ulimit stack=67108864 \
|
||
-e NCCL_IGNORE_DISABLED_P2P=1 \
|
||
-p 8000:8000 \
|
||
--name vllm_service \
|
||
--restart always \
|
||
-v $HOME/.cache/huggingface:/root/.cache/huggingface \
|
||
ghcr.io/vllm/vllm:latest \
|
||
--model $MODEL_NAME \
|
||
--tensor-parallel-size $GPU_COUNT \
|
||
--host 0.0.0.0 \
|
||
--port 8000
|
||
|
||
echo "vLLM服务已成功部署!"
|
||
echo "访问地址: http://localhost:8000"
|
||
echo "您可以使用以下命令查看日志: docker logs -f vllm_service"
|