From 0fb956fab10a9b24087a00dd43c0d1862cacb6e2 Mon Sep 17 00:00:00 2001
From: liu <liujiatong112@163.com>
Date: Mon, 12 Aug 2024 15:56:50 +0800
Subject: [PATCH] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E9=83=A8=E7=BD=B2=E6=96=87?=
 =?UTF-8?q?=E6=A1=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 docker/fu-hsi-web/README.md                   |  3 ++
 docker/ollama/README.md                       | 31 +++++++++++++++++++
 .../controller/ModelCaseController.java       |  1 +
 3 files changed, 35 insertions(+)
 create mode 100644 docker/ollama/README.md

diff --git a/docker/fu-hsi-web/README.md b/docker/fu-hsi-web/README.md
index 6bdadea..b8d8de7 100644
--- a/docker/fu-hsi-web/README.md
+++ b/docker/fu-hsi-web/README.md
@@ -1,3 +1,6 @@
+# 文件目录说明
+- /data/fu-hsi : 本项目根目录,本项目所有目录都在这个路径下面
+
 构建步骤
 - 运行Dockerfile文件构建镜像 docker build  -t fu-hsi-web:1.0.0 .
 
diff --git a/docker/ollama/README.md b/docker/ollama/README.md
new file mode 100644
index 0000000..6a89dbb
--- /dev/null
+++ b/docker/ollama/README.md
@@ -0,0 +1,31 @@
+# 文件目录说明
+- /data/ollama : 本项目根目录,本项目所有目录都在这个路径下面
+
+# 使用Docker构建步骤
+
+- 首先安装容器工具包
+    1. 首先配置源:curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo
+    2. 安装: sudo yum install -y nvidia-container-toolkit
+    3. 配置docker: sudo nvidia-ctk runtime configure --runtime=docker
+    4. 重启docker: systemctl restart docker
+- 构建步骤
+  1. 拉取ollama的镜像
+     - 在线拉取: docker pull ollama/ollama:0.3.5
+     - 本地加载: docker load -i ollama-0.3.5.tar
+  2. 运行容器: sudo docker run -d --gpus=all -e NVIDIA_VISIBLE_DEVICES=all -v /data/ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
+  3. 将模型文件复制本地目录/data/ollama/gguf下
+  4. 创建文件:touch Modlefile
+  5. 编辑Modlefile,并粘贴内容
+    ~~~ 
+    FROM ./这里替换成需要部署的模型名称.gguf
+    
+    TEMPLATE """
+    {{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}
+    {{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}
+    <|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>
+    """
+    ~~~
+  6. 进入容器: docker exec -it ollama bash
+  7. 进入目录: cd /root/.ollama/gguf
+  8. ollama create llama3-chinese:8b -f Modelfile  (将 llama3-chinese:8b修改成你想要命名的名称)
+    
diff --git a/src/main/java/com/supervision/police/controller/ModelCaseController.java b/src/main/java/com/supervision/police/controller/ModelCaseController.java
index 8852904..7f58443 100644
--- a/src/main/java/com/supervision/police/controller/ModelCaseController.java
+++ b/src/main/java/com/supervision/police/controller/ModelCaseController.java
@@ -1,5 +1,6 @@
 package com.supervision.police.controller;
 
+import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
 import com.baomidou.mybatisplus.core.metadata.IPage;
 import com.supervision.common.domain.R;
 import com.supervision.police.domain.CasePerson;