Merge remote-tracking branch 'origin/dev_1.0.0' into dev_1.0.0

# Conflicts:
#	src/main/java/com/supervision/police/service/impl/ModelCaseServiceImpl.java
topo_dev
DESKTOP-DDTUS3E\yaxin 10 months ago
commit c45b374f7a

@ -0,0 +1,30 @@
# mysql相关配置内容
MYSQL_PASSWORD=12345678
MYSQL_PORT=5415
# minio相关配置内容
MINIO_CONSOLE_PORT=9001
MINIO_PORT=9000
MINIO_USER=admin
MINIO_PASSWORD=12345678
# neo4j相关配置内容
NEO4J_PORT_HTTP=7474
NEO4J_PORT_BOLT=7687
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD=123456
# 伏羲后台服务相关配置内容
WEB_HTTP_PORT=9380
SPRING_PROFILES_ACTIVE=bridge
OLLAMA_URL=http://192.168.10.70:11434
OLLAMA_MODEL=llama3-chinese:8b
LANGCHAIN_CHAT_URL=http://113.128.242.110:7861
# nginx相关配置内容
NGINX_HTTP_PORT=80
FU_HSI_SERVERS=fu-hsi-web:8097
TIMEZONE='Asia/Shanghai'

@ -0,0 +1,73 @@
## 🎬 快速开始
### 📝 前提条件
- CPU >= 16 核
- RAM >= 32 GB
- Disk >= 500 GB
- Docker >= 24.0.6 & Docker Compose >= v2.5.0
### 安装包结构说明
| 文件/文件夹 | 说明 |
| - |---------------|
| fu-hsi-nginx/ | 用于存放nginx静态文件 |
| fu-hsi-web/ | 用户存放后端服务程序 |
| .env | 应用的环境配置 |
| docker-compose.yml | docker-compose 配置文件 |
| docker-compose-base.yml | docker-compose 配置文件 |
### 🚀 启动服务
1. 加载镜像
```shell
# 进入deploy文件目录
cd ./deploy
# 加载镜像
docker load -i fu-shi-install.1.0.0.tar.gz
```
2. 修改配置文件
```shell
# 编辑配置文件
$ vi .env
# 根据环境修改配置文件
OLLAMA_URL:ollama访问地址
OLLAMA_MODEL:模型名称
LANGCHAIN_CHAT_URL:langchain-chat服务地址
# 其他配置项可保持默认值
```
3. 进入deploy目录,执行命令
```shell
docker compose up -d
```
等待程序启动完成即可,如果没有启动成功,可重试上面的命令。
启动完成后会打印出下面的日志信息:
[+] Running 5/5
✔ Container fu-hsi-mysql Healthy 0.0s
✔ Container fu-hsi-minio Running 0.0s
✔ Container fu-hsi-neo4j Running 0.0s
✔ Container fu-hsi-web Healthy 0.4s
✔ Container fu-hsi-nginx Started
4. 查看启动情况
```shell
$ docker compose ps
> docker compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
fu-hsi-minio quay.io/minio/minio:RELEASE.2023-12-20T01-00-02Z "/usr/bin/docker-ent…" minio 20 minutes ago Up 20 minutes 0.0.0.0:9000-9001->9000-9001/tcp
fu-hsi-mysql mysql:5.7 "docker-entrypoint.s…" mysql 20 minutes ago Up 20 minutes (healthy) 33060/tcp, 0.0.0.0:5415->3306/tcp
fu-hsi-neo4j neo4j:4.4 "tini -g -- /startup…" neo4j 20 minutes ago Up 20 minutes 0.0.0.0:7474->7474/tcp, 7473/tcp, 0.0.0.0:7687->7687/tcp
fu-hsi-nginx fu-hsi-nginx:1.0.0 "/docker-entrypoint.…" fu-hsi-nginx 20 minutes ago Up 17 minutes 0.0.0.0:80->80/tcp, 443/tcp
fu-hsi-web fu-hsi-web:1.0.0 "java -Xms256m -Xmx1…" fu-hsi-web 20 minutes ago Up 18 minutes (unhealthy) 0.0.0.0:9380->8097/tcp
```
## 🚀 访问服务
访问地址http://ip:9978/case-management

@ -0,0 +1,77 @@
services:
mysql:
image: mysql:5.7
container_name: fu-hsi-mysql
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}
- TZ=${TIMEZONE}
command:
--max_connections=100
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--default-authentication-plugin=mysql_native_password
--tls_version="TLSv1.2,TLSv1.3"
ports:
- ${MYSQL_PORT}:3306
volumes:
- ./mysql/data:/var/lib/mysql
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- fu-hsi
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-uroot", "-p${MYSQL_PASSWORD}"]
interval: 10s
timeout: 10s
retries: 3
start_period: 40s
restart: always
minio:
image: quay.io/minio/minio:RELEASE.2023-12-20T01-00-02Z
container_name: fu-hsi-minio
command: server --console-address ":9001" /data
ports:
- ${MINIO_PORT}:9000
- ${MINIO_CONSOLE_PORT}:9001
environment:
- MINIO_ROOT_USER=${MINIO_USER}
- MINIO_ROOT_PASSWORD=${MINIO_PASSWORD}
- TZ=${TIMEZONE}
volumes:
- ./minio/data:/data
networks:
- fu-hsi
# healthcheck:
# test: ["CMD","curl -fs http://localhost/ || exit 1"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 20s
restart: always
neo4j:
image: neo4j:4.4
container_name: fu-hsi-neo4j
ports:
- ${NEO4J_PORT_HTTP}:7474
- ${NEO4J_PORT_BOLT}:7687
environment:
- NEO4J_AUTH=${NEO4J_USERNAME}/${NEO4J_PASSWORD}
volumes:
- ./neo4j/data:/data
- ./neo4j/logs:/logs
- ./neo4j/import:/var/lib/neo4j/import
- ./neo4j/plugins:/var/lib/neo4j/plugins
networks:
- fu-hsi
# healthcheck:
# test: ["CMD", "curl", "-sS", "http://localhost:7474"]
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 40s
# restart: always
networks:
fu-hsi:
driver: bridge

@ -0,0 +1,49 @@
include:
- path: ./docker-compose-base.yml
env_file: ./.env
services:
fu-hsi-web:
depends_on:
mysql:
condition: service_started
neo4j:
condition: service_started
minio:
condition: service_started
image: fu-hsi-web:1.0.0
container_name: fu-hsi-web
ports:
- ${WEB_HTTP_PORT}:8097
volumes:
- ./fu-hsi-web/:/data/fu-hsi/web/
environment:
- TZ=${TIMEZONE}
- spring.profiles.active=${SPRING_PROFILES_ACTIVE}
- ollama.url=${OLLAMA_URL}
- ollama.model=${OLLAMA_MODEL}
- langChain-chat.url=${LANGCHAIN_CHAT_URL}
- datasource.username=${MYSQL_PASSWORD}
- neo4j.username=${NEO4J_USERNAME}
- neo4j.password=${NEO4J_PASSWORD}
- minio.username=${MINIO_USER}
- minio.password=${MINIO_PASSWORD}
networks:
- fu-hsi
restart: always
fu-hsi-nginx:
depends_on:
fu-hsi-web:
condition: service_started
image: fu-hsi-nginx:1.0.0
container_name: fu-hsi-nginx
ports:
- ${NGINX_HTTP_PORT}:80
volumes:
- ./fu-hsi-nginx/html/:/usr/share/nginx/html/
environment:
- TZ=${TIMEZONE}
- FU_HSI_SERVERS=${FU_HSI_SERVERS}
networks:
- fu-hsi
restart: always

@ -1,3 +1,6 @@
# 文件目录说明
- /data/fu-hsi : 本项目根目录,本项目所有目录都在这个路径下面
构建步骤
- 运行Dockerfile文件构建镜像 docker build -t fu-hsi-web:1.0.0 .

@ -0,0 +1,44 @@
# 文件目录说明
- /data/ollama : 本项目根目录,本项目所有目录都在这个路径下面
# 前置条件
- 本机安装了Docker
- 安装了Nvidia GPU驱动
- 安装了容器工具包nvidia-container-toolkit
# 下载gguf模型:
- 网址: https://hf-mirror.com/
- 搜要下载的模型,比如,想现在llama3 8b的模型,则搜索: llama3 8b gguf
- 找到gguf字样的进去,下载,下载建议下载k4_0级别的,效果和效率最优,注意下载文件格式一定是.gguf的才行
# Modelfile文件
- 创建文件:touch Modlefile
- 编辑Modlefile,修改并粘贴以下内容
~~~
FROM ./这里替换成需要部署的模型名称.gguf
TEMPLATE """
{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}
{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}
<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>
"""
~~~
# 使用Docker构建步骤
- 首先安装容器工具包(必须,如果已安装可以不安装)
1. 首先配置源:curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo
2. 安装: sudo yum install -y nvidia-container-toolkit
3. 配置docker: sudo nvidia-ctk runtime configure --runtime=docker
4. 重启docker: systemctl restart docker
- 构建步骤
1. 加载ollama的镜像,镜像版本0.2.8
- 本地加载: docker load -i ollama.tar.gz
2. 运行容器: sudo docker run -d --gpus=all -e NVIDIA_VISIBLE_DEVICES=all -v /data/ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
3. 将模型文件复制本地目录/data/ollama/gguf下
4. 在/data/ollama/gguf目录下创建Modlefile文件,并将FROM的模型的名称修改为对应的.gguf文件名称
5. 进入容器: docker exec -it ollama bash
6. 进入目录: cd /root/.ollama/gguf
7. 执行命令ollama create llama3-chinese:8b -f Modelfile (将 llama3-chinese:8b修改成你想要命名的名称)

Binary file not shown.

@ -3,6 +3,7 @@ package com.supervision.common.utils;
import com.baomidou.mybatisplus.core.metadata.IPage;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@ -12,14 +13,14 @@ import static com.supervision.common.constant.Constants.TOTAL_COUNT;
public class IPages {
public static Map<String, Object> buildDataMap(IPage<?> iPage) {
Map<String, Object> dataMap = new HashMap<>();
Map<String, Object> dataMap = new LinkedHashMap<>();
dataMap.put(TOTAL_COUNT, iPage.getTotal());
dataMap.put(RESULT_LIST, iPage.getRecords());
return dataMap;
}
public static Map<String, Object> buildDataMap(List list, int total) {
Map<String, Object> dataMap = new HashMap();
Map<String, Object> dataMap = new LinkedHashMap<>();
dataMap.put(TOTAL_COUNT, total);
dataMap.put(RESULT_LIST, list);
return dataMap;

@ -1,9 +1,11 @@
package com.supervision.police.controller;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.supervision.common.domain.R;
import com.supervision.police.domain.CasePerson;
import com.supervision.police.domain.ModelCase;
import com.supervision.police.dto.IndexDetail;
import com.supervision.police.dto.ModelCaseBase;
import com.supervision.police.dto.ModelCaseDTO;
import com.supervision.police.service.ModelCaseService;
@ -27,6 +29,7 @@ public class ModelCaseController {
/**
*
*
* @param modelCase
* @param page
* @param size
@ -35,25 +38,27 @@ public class ModelCaseController {
@Operation(summary = "查询案件列表")
@PostMapping("/queryList")
public R<IPage<ModelCaseDTO>> queryList(@RequestBody ModelCaseVO modelCase,
@RequestParam(required = false, defaultValue = "1") Integer page,
@RequestParam(required = false, defaultValue = "20") Integer size) {
@RequestParam(required = false, defaultValue = "1") Integer page,
@RequestParam(required = false, defaultValue = "20") Integer size) {
IPage<ModelCaseDTO> modelCaseDTOIPage = modelCaseService.queryList(modelCase, page, size);
return R.ok(modelCaseDTOIPage);
}
/**
*
*
* @param caseNo
* @return
*/
@GetMapping("/checkCaseNo")
public R<?> checkCaseNo(@RequestParam @Parameter(name = "caseNo",description = "案件编号") String caseNo,
@RequestParam(required = false) @Parameter(name = "caseId",description = "案件id") String caseId) {
return modelCaseService.checkCaseNo(caseNo,caseId);
public R<?> checkCaseNo(@RequestParam @Parameter(name = "caseNo", description = "案件编号") String caseNo,
@RequestParam(required = false) @Parameter(name = "caseId", description = "案件id") String caseId) {
return modelCaseService.checkCaseNo(caseNo, caseId);
}
/**
*
*
* @param modelCaseBase
* @return
*/
@ -74,6 +79,7 @@ public class ModelCaseController {
/**
*
*
* @param id
* @return
*/
@ -84,6 +90,7 @@ public class ModelCaseController {
/**
*
*
* @param name
* @return
*/
@ -107,6 +114,7 @@ public class ModelCaseController {
/**
*
*
* @param file
* @return
*/
@ -117,16 +125,18 @@ public class ModelCaseController {
/**
*
*
* @param caseId
* @param indexType
* @return
*/
@PostMapping("/getIndexDetail")
public R<?> getIndexDetail(@RequestParam String caseId,
public R<IPage<IndexDetail>> getIndexDetail(@RequestParam String caseId,
@RequestParam String indexType,
@RequestParam(required = false, defaultValue = "1") Integer page,
@RequestParam(required = false, defaultValue = "20") Integer size) {
return modelCaseService.getIndexDetail(caseId, indexType, page, size);
IPage<IndexDetail> indexDetail = modelCaseService.getIndexDetail(caseId, indexType, page, size);
return R.ok(indexDetail);
}
}

@ -10,6 +10,8 @@ import java.util.Objects;
@Data
public class AtomicIndexDTO {
private String indexId;
private String atomicIndexId;
/**

@ -7,6 +7,8 @@ import java.util.List;
@Data
public class IndexDetail {
private String indexId;
/**
*
*/

@ -45,6 +45,7 @@ public interface ModelCaseMapper extends BaseMapper<ModelCase> {
@Param("indexType") String indexType);
List<AtomicIndexDTO> getAtomicDetail(@Param("caseId") String caseId,
@Param("indexId") String indexId,
@Param("atomicIds") List<String> atomicIds);
}

@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.extension.service.IService;
import com.supervision.common.domain.R;
import com.supervision.police.domain.CasePerson;
import com.supervision.police.domain.ModelCase;
import com.supervision.police.dto.IndexDetail;
import com.supervision.police.dto.ModelCaseBase;
import com.supervision.police.dto.ModelCaseDTO;
import com.supervision.police.vo.ModelCaseVO;
@ -45,7 +46,7 @@ public interface ModelCaseService extends IService<ModelCase> {
R<?> uploadCase(MultipartFile file);
R<?> getIndexDetail(String caseId, String indexType, Integer page, Integer size);
IPage<IndexDetail> getIndexDetail(String caseId, String indexType, Integer page, Integer size);
}

@ -352,7 +352,7 @@ public class ModelCaseServiceImpl extends ServiceImpl<ModelCaseMapper, ModelCase
}
@Override
public R<?> getIndexDetail(String caseId, String indexType, Integer page, Integer size) {
public IPage<IndexDetail> getIndexDetail(String caseId, String indexType, Integer page, Integer size) {
IPage<IndexDetail> iPage = new Page<>(page, size);
iPage = modelCaseMapper.getIndexDetail(iPage, caseId, indexType);
List<IndexDetail> records = iPage.getRecords();
@ -368,7 +368,7 @@ public class ModelCaseServiceImpl extends ServiceImpl<ModelCaseMapper, ModelCase
Map<String, String> indexJundgeLogicMap = parseLogicMap(judgeLogic);
String[] array = record.getAtomicIds().split(",");
List<String> atomicIds = Arrays.asList(array);
List<AtomicIndexDTO> atomics = modelCaseMapper.getAtomicDetail(caseId, atomicIds);
List<AtomicIndexDTO> atomics = modelCaseMapper.getAtomicDetail(caseId, record.getIndexId(), atomicIds);
for (AtomicIndexDTO atomic : atomics) {
// 需要和原子指标的规则判断是否一致(解决出罪和入罪冲突的问题)
String s = indexJundgeLogicMap.get(atomic.getAtomicIndexId());
@ -384,8 +384,7 @@ public class ModelCaseServiceImpl extends ServiceImpl<ModelCaseMapper, ModelCase
}
record.setChildren(atomics);
}
iPage.setRecords(records);
return R.ok(IPages.buildDataMap(iPage));
return iPage;
}
private Map<String, String> parseLogicMap(String judgeLogic) {

@ -0,0 +1,69 @@
spring:
application:
name: fu-hsi-server
ai:
ollama:
base-url: ${ollama.url}
chat:
enabled: true
options:
model: ${ollama.model}
# 控制模型在请求后加载到内存中的时间(稍微长一点的时间,避免重复加载浪费性能,加快处理速度)
keep_alive: 30m
# 例如0.3
temperature: 0.8
format: json
# 减少产生废话的可能性。较高的值例如100将给出更多样化的答案而较低的值例如10将更加保守。
top-k: 90
# 与top-k一起工作。较高的值例如0.95将导致更加多样化的文本而较低的值例如0.5)将生成更加集中和保守的文本。
top-p: 0.95
# 随机数种子,用于控制模型输出的随机性。
seed: 1
datasource:
type: com.alibaba.druid.pool.DruidDataSource
druid:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://fu-hsi-mysql:3306/nx_llm?useUnicode=true&characterEncoding=utf-8&useSSL=true&nullCatalogMeansCurrent=true&serverTimezone=GMT%2B8
username: root
password: ${datasource.username}
initial-size: 5 # 初始化大小
min-idle: 10 # 最小连接数
max-active: 20 # 最大连接数
max-wait: 60000 # 获取连接时的最大等待时间
min-evictable-idle-time-millis: 300000 # 一个连接在池中最小生存的时间,单位是毫秒
time-between-eviction-runs-millis: 60000 # 多久才进行一次检测需要关闭的空闲连接,单位是毫秒
filters: stat,wall # 配置扩展插件stat-监控统计log4j-日志wall-防火墙防止SQL注入去掉后监控界面的sql无法统计
validation-query: SELECT 1 # 检测连接是否有效的 SQL语句为空时以下三个配置均无效
test-on-borrow: true # 申请连接时执行validationQuery检测连接是否有效默认true开启后会降低性能
test-on-return: true # 归还连接时执行validationQuery检测连接是否有效默认false开启后会降低性能
test-while-idle: true # 申请连接时如果空闲时间大于timeBetweenEvictionRunsMillis执行validationQuery检测连接是否有效默认false建议开启不影响性能
stat-view-servlet:
enabled: false # 是否开启 StatViewServlet
filter:
stat:
enabled: true # 是否开启 FilterStat默认true
log-slow-sql: true # 是否开启 慢SQL 记录默认false
slow-sql-millis: 5000 # 慢 SQL 的标准,默认 3000单位毫秒
merge-sql: false # 合并多个连接池的监控数据默认false
data:
neo4j:
database: neo4j
neo4j:
uri: bolt://fu-hsi-neo4j:7687
authentication:
username: ${neo4j.username}
password: ${neo4j.password}
minio:
endpoint: http://fu-hsi-minio:9002
accessKey: ${minio.username}
secretKey: ${minio.password}
bucketName: nxfuhsi
logging:
level:
org.springframework.ai: TRACE
langChain-chat:
url:

@ -23,7 +23,8 @@
</foreach>
</if>
<if test="modelCase.involvedPerson != null and modelCase.involvedPerson != ''">
and ( law_actor like concat('%', #{modelCase.involvedPerson}, '%') or law_party like concat('%', #{modelCase.involvedPerson}, '%'))
and ( law_actor like concat('%', #{modelCase.involvedPerson}, '%') or law_party like concat('%',
#{modelCase.involvedPerson}, '%'))
</if>
<if test="modelCase.lawActor != null and modelCase.lawActor != ''">
and law_actor like concat('%', #{modelCase.lawActor}, '%')
@ -53,23 +54,29 @@
and mi.index_type = #{indexType}
</select>
<select id="getAtomicDetail" resultType="com.supervision.police.dto.AtomicIndexDTO">
select mai.id as atomicIndexId,mai.name as indexName,mai.index_source as indexSource,
select mar.index_id as indexId, mai.id as atomicIndexId,mai.name as indexName,mai.index_source as indexSource,
mar.atomic_result as atomicResult,
concat(nrs.question, nrs.answer) as record
from model_atomic_result mar
left join model_atomic_index mai on mar.atomic_id = mai.id
left join note_record_split nrs on mar.record_split_id = nrs.id
where mar.case_id = #{caseId} and mar.atomic_id in
where mar.case_id = #{caseId} and mar.index_id = #{indexId} and mar.atomic_id in
<foreach collection="atomicIds" item="item" open="(" close=")" separator=",">
#{item}
</foreach>
</select>
<select id="getIndexDetail" resultType="com.supervision.police.dto.IndexDetail">
select mi.name as indexName, mi.index_score as score, mir.index_result,mir.pre_result as preResult, mir.atomic_ids,mi.judge_logic as judgeLogic
select mi.id as indexId,
mi.name as indexName,
mi.index_score as score,
mir.index_result,
mir.pre_result as preResult,
mir.atomic_ids,
mi.judge_logic as judgeLogic
from model_index mi
left join model_index_result mir on ( mi.id = mir.index_id and mir.case_id = #{caseId} )
left join model_index_result mir on (mi.id = mir.index_id and mir.case_id = #{caseId})
WHERE mi.data_status = '1'
and mi.index_type = #{indexType}
order by mir.index_result desc
order by CASE mir.index_result WHEN 'true' THEN 1 WHEN 'false' THEN 2 else 0 END, mi.id desc
</select>
</mapper>
Loading…
Cancel
Save