网站怎么做会员系统,前端面试官常问的问题,网站搭建说明,wordpress红色主题公园一、下载Ollama
在线安装#xff1a;
在linux中输入命令curl -fsSL https://ollama.com/install.sh | sh 由于在linux下载ollama需要经过外网#xff0c;网络会不稳定#xff0c;很容易造成连接超时的问题。
离线安装#xff1a;
步骤一#xff1a; 下载Ollama离线版本…一、下载Ollama
在线安装
在linux中输入命令curl -fsSL https://ollama.com/install.sh | sh 由于在linux下载ollama需要经过外网网络会不稳定很容易造成连接超时的问题。
离线安装
步骤一 下载Ollama离线版本 在linux服务器中输入命令lscpu查看服务器型号 然后再该地址中下载Ollama离线版本 https://github.com/ollama/ollama/releases 步骤二 下载install.sh文件修改内容 地址为https://ollama.com/install.sh
修改位置1 注释掉在线下载ollama的命令 修改位置2
修改ollama安装地址将ollama离线版本与install放到一起 install.sh最终修改的版本
#!/bin/sh
# This script installs Ollama on Linux.
# It detects the current operating system architecture and installs the appropriate version of Ollama.set -eustatus() { echo $* 2; }
error() { echo ERROR $*; exit 1; }
warning() { echo WARNING: $*; }TEMP_DIR$(mktemp -d)
cleanup() { rm -rf $TEMP_DIR; }
trap cleanup EXITavailable() { command -v $1 /dev/null; }
require() {local MISSINGfor TOOL in $*; doif ! available $TOOL; thenMISSING$MISSING $TOOLfidoneecho $MISSING
}[ $(uname -s) Linux ] || error This script is intended to run on Linux only.ARCH$(uname -m)
case $ARCH inx86_64) ARCHamd64 ;;aarch64|arm64) ARCHarm64 ;;*) error Unsupported architecture: $ARCH ;;
esacIS_WSL2falseKERN$(uname -r)
case $KERN in*icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2true;;*icrosoft) error Microsoft WSL1 is not currently supported. Please upgrade to WSL2 with wsl --set-version distro 2 ;;*) ;;
esacVER_PARAM${OLLAMA_VERSION:?version$OLLAMA_VERSION}SUDO
if [ $(id -u) -ne 0 ]; then# Running as root, no need for sudoif ! available sudo; thenerror This script requires superuser permissions. Please re-run as root.fiSUDOsudo
fiNEEDS$(require curl awk grep sed tee xargs)
if [ -n $NEEDS ]; thenstatus ERROR: The following tools are required but missing:for NEED in $NEEDS; doecho - $NEEDdoneexit 1
fistatus Downloading ollama...
# curl --fail --show-error --location --progress-bar -o $TEMP_DIR/ollama https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}for BINDIR in /usr/local/bin /usr/bin /bin; doecho $PATH | grep -q $BINDIR break || continue
donestatus Installing ollama to $BINDIR...
$SUDO install -o0 -g0 -m755 -d $BINDIR
# $SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $BINDIR/ollama
$SUDO install -o0 -g0 -m755 ./ollama-linux-amd64 $BINDIR/ollamainstall_success() {status The Ollama API is now available at 127.0.0.1:11434.status Install complete. Run ollama from the command line.
}
trap install_success EXIT# Everything from this point onwards is optional.configure_systemd() {if ! id ollama /dev/null 21; thenstatus Creating ollama user...$SUDO useradd -r -s /bin/false -U -m -d /usr/share/ollama ollamafiif getent group render /dev/null 21; thenstatus Adding ollama user to render group...$SUDO usermod -a -G render ollamafiif getent group video /dev/null 21; thenstatus Adding ollama user to video group...$SUDO usermod -a -G video ollamafistatus Adding current user to ollama group...$SUDO usermod -a -G ollama $(whoami)status Creating ollama systemd service...cat EOF | $SUDO tee /etc/systemd/system/ollama.service /dev/null
[Unit]
DescriptionOllama Service
Afternetwork-online.target[Service]
ExecStart$BINDIR/ollama serve
Userollama
Groupollama
Restartalways
RestartSec3
EnvironmentPATH$PATH[Install]
WantedBydefault.target
EOFSYSTEMCTL_RUNNING$(systemctl is-system-running || true)case $SYSTEMCTL_RUNNING inrunning|degraded)status Enabling and starting ollama service...$SUDO systemctl daemon-reload$SUDO systemctl enable ollamastart_service() { $SUDO systemctl restart ollama; }trap start_service EXIT;;esac
}if available systemctl; thenconfigure_systemd
fi# WSL2 only supports GPUs via nvidia passthrough
# so check for nvidia-smi to determine if GPU is available
if [ $IS_WSL2 true ]; thenif available nvidia-smi [ -n $(nvidia-smi | grep -o CUDA Version: [0-9]*\.[0-9]*) ]; thenstatus Nvidia GPU detected.fiinstall_successexit 0
fi# Install GPU dependencies on Linux
if ! available lspci ! available lshw; thenwarning Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies.exit 0
ficheck_gpu() {# Look for devices based on vendor ID for NVIDIA and AMDcase $1 inlspci)case $2 innvidia) available lspci lspci -d 10de: | grep -q NVIDIA || return 1 ;;amdgpu) available lspci lspci -d 1002: | grep -q AMD || return 1 ;;esac ;;lshw)case $2 innvidia) available lshw $SUDO lshw -c display -numeric | grep -q vendor: .* \[10DE\] || return 1 ;;amdgpu) available lshw $SUDO lshw -c display -numeric | grep -q vendor: .* \[1002\] || return 1 ;;esac ;;nvidia-smi) available nvidia-smi || return 1 ;;esac
}if check_gpu nvidia-smi; thenstatus NVIDIA GPU installed.exit 0
fiif ! check_gpu lspci nvidia ! check_gpu lshw nvidia ! check_gpu lspci amdgpu ! check_gpu lshw amdgpu; theninstall_successwarning No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode.exit 0
fiif check_gpu lspci amdgpu || check_gpu lshw amdgpu; then# Look for pre-existing ROCm v6 before downloading the dependenciesfor search in ${HIP_PATH:-} ${ROCM_PATH:-} /opt/rocm /usr/lib64; doif [ -n ${search} ] [ -e ${search}/libhipblas.so.2 -o -e ${search}/lib/libhipblas.so.2 ]; thenstatus Compatible AMD GPU ROCm library detected at ${search}install_successexit 0fidonestatus Downloading AMD GPU dependencies...$SUDO rm -rf /usr/share/ollama/lib$SUDO chmod ox /usr/share/ollama$SUDO install -o ollama -g ollama -m 755 -d /usr/share/ollama/lib/rocmcurl --fail --show-error --location --progress-bar https://ollama.com/download/ollama-linux-amd64-rocm.tgz${VER_PARAM} \| $SUDO tar zx --owner ollama --group ollama -C /usr/share/ollama/lib/rocm .install_successstatus AMD GPU ready.exit 0
fi# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
install_cuda_driver_yum() {status Installing NVIDIA repository...case $PACKAGE_MANAGER inyum)$SUDO $PACKAGE_MANAGER -y install yum-utils$SUDO $PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-$1$2.repo;;dnf)$SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-$1$2.repo;;esaccase $1 inrhel)status Installing EPEL repository...# EPEL is required for third-party dependencies such as dkms and libvdpau$SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm || true;;esacstatus Installing CUDA driver...if [ $1 centos ] || [ $1$2 rhel7 ]; then$SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkmsfi$SUDO $PACKAGE_MANAGER -y install cuda-drivers
}# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
install_cuda_driver_apt() {status Installing NVIDIA repository...curl -fsSL -o $TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-keyring_1.1-1_all.debcase $1 indebian)status Enabling contrib sources...$SUDO sed s/main/contrib/ /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list /dev/nullif [ -f /etc/apt/sources.list.d/debian.sources ]; then$SUDO sed s/main/contrib/ /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources /dev/nullfi;;esacstatus Installing CUDA driver...$SUDO dpkg -i $TEMP_DIR/cuda-keyring.deb$SUDO apt-get update[ -n $SUDO ] SUDO_E$SUDO -E || SUDO_EDEBIAN_FRONTENDnoninteractive $SUDO_E apt-get -y install cuda-drivers -q
}if [ ! -f /etc/os-release ]; thenerror Unknown distribution. Skipping CUDA installation.
fi. /etc/os-releaseOS_NAME$ID
OS_VERSION$VERSION_IDPACKAGE_MANAGER
for PACKAGE_MANAGER in dnf yum apt-get; doif available $PACKAGE_MANAGER; thenbreakfi
doneif [ -z $PACKAGE_MANAGER ]; thenerror Unknown package manager. Skipping CUDA installation.
fiif ! check_gpu nvidia-smi || [ -z $(nvidia-smi | grep -o CUDA Version: [0-9]*\.[0-9]*) ]; thencase $OS_NAME incentos|rhel) install_cuda_driver_yum rhel $(echo $OS_VERSION | cut -d . -f 1) ;;rocky) install_cuda_driver_yum rhel $(echo $OS_VERSION | cut -c1) ;;fedora) [ $OS_VERSION -lt 37 ] install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME 37;;amzn) install_cuda_driver_yum fedora 37 ;;debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed s/\.//) ;;*) exit ;;esac
fiif ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; thenKERNEL_RELEASE$(uname -r)case $OS_NAME inrocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ;;centos|rhel|amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ;;fedora) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE ;;debian|ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ;;*) exit ;;esacNVIDIA_CUDA_VERSION$($SUDO dkms status | awk -F: /added/ { print $1 })if [ -n $NVIDIA_CUDA_VERSION ]; then$SUDO dkms install $NVIDIA_CUDA_VERSIONfiif lsmod | grep -q nouveau; thenstatus Reboot to complete NVIDIA CUDA driver install.exit 0fi$SUDO modprobe nvidia$SUDO modprobe nvidia_uvm
fi# make sure the NVIDIA modules are loaded on boot with nvidia-persistenced
if command -v nvidia-persistenced /dev/null 21; then$SUDO touch /etc/modules-load.d/nvidia.confMODULESnvidia nvidia-uvmfor MODULE in $MODULES; doif ! grep -qxF $MODULE /etc/modules-load.d/nvidia.conf; thenecho $MODULE | sudo tee -a /etc/modules-load.d/nvidia.conf /dev/nullfidone
fistatus NVIDIA GPU ready.
install_success出现该内容说明Ollama已经安装完成
二、启动Nginx并部署Vue
启动nginx命令systemctl start nginx.service 查看nginx状态systemctl status nginx.service 关闭nginx命令systemctl stop nginx.service
修改子配置文件因为子配置文件内是写http的内容。 nginx服务所在地址为/etc/nginx/sites-available 进入该目录编辑default文件vim default
index index.html index.htm index.nginx-debian.html;# First attempt to serve request as file, then# as directory, then fall back to displaying a 404.try_files $uri $uri/ router;
}location router {rewrite ^.*$ /index.html last;
}如果你前端使用的是vue并且用了vue-router那么就需要配置该代码否则你进行router跳转的时候就会出现404的问题。
三、启动Python脚本
进入存放python脚本的目录运行命令python xxx.py。运行脚本后系统可能会提示有一些模块没有安装按照提示安装即可。 命令pip install module_name 其中可能有些脚本提示不对比如 ModuleNotFoundError: No module named docx 如果出现这个问题不能直接安装docx模块而是应该安装python-docx。 将该安装的库全部安装后进入放置python脚本的目录启动入口文件短暂启动命令python ai_analysis.py
持久后台运行命令 nohup python ai_analysis.py /opt/app/llm_python/ai_analysis_project/log 21
四、目前项目需要的库
使用MimiCPM需要的库官方测试所用的环境 Pillow10.1.0 torch2.1.2 / 1.13.0原本的库版本 torchvision0.16.2 / 0.17.1原本的库版本 transformers4.40.0 sentencepiece0.1.99 accelerate0.30.1 bitsandbytes0.43.1
AI分析所需要的库 langchain langchain_community
分析文档所需要的库 pandasai python-docx fitz faiss-gpu (conda install faiss-gpu -c pytorch)