From c669cbb12b81aac038f64f945090ed2c7223132f Mon Sep 17 00:00:00 2001
From: old-tom <892955278@msn.cn>
Date: Sun, 30 Mar 2025 10:02:58 +0800
Subject: [PATCH] =?UTF-8?q?feat:=20=E9=83=A8=E5=88=86=E4=BF=AE=E6=94=B9?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.idea/.gitignore | 8 +
.idea/dictionaries | 6 +
.idea/encodings.xml | 6 +
.../inspectionProfiles/profiles_settings.xml | 6 +
.idea/llmFunctionCallDemo.iml | 8 +
.idea/misc.xml | 4 +
.idea/modules.xml | 8 +
.idea/vcs.xml | 6 +
env.toml | 36 +++
llm_agent.py | 211 ------------------
llmagent/__init__.py | 39 ++++
llmagent/llm_agent.py | 148 ++++++++++++
llmagent/llm_config.py | 49 ++++
llmtools/__init__.py | 15 ++
llmtools/basic_tool.py | 30 ---
llmtools/tool_impl.py | 80 +++++++
log_conf.py | 44 ++++
main.py | 21 +-
requirements.txt | 22 ++
vector_agent.py => vector_db.py | 2 +-
20 files changed, 500 insertions(+), 249 deletions(-)
create mode 100644 .idea/.gitignore
create mode 100644 .idea/dictionaries
create mode 100644 .idea/encodings.xml
create mode 100644 .idea/inspectionProfiles/profiles_settings.xml
create mode 100644 .idea/llmFunctionCallDemo.iml
create mode 100644 .idea/misc.xml
create mode 100644 .idea/modules.xml
create mode 100644 .idea/vcs.xml
create mode 100644 env.toml
delete mode 100644 llm_agent.py
create mode 100644 llmagent/__init__.py
create mode 100644 llmagent/llm_agent.py
create mode 100644 llmagent/llm_config.py
delete mode 100644 llmtools/basic_tool.py
create mode 100644 llmtools/tool_impl.py
create mode 100644 log_conf.py
rename vector_agent.py => vector_db.py (99%)
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/dictionaries b/.idea/dictionaries
new file mode 100644
index 0000000..3c30bb9
--- /dev/null
+++ b/.idea/dictionaries
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/encodings.xml b/.idea/encodings.xml
new file mode 100644
index 0000000..c2bae49
--- /dev/null
+++ b/.idea/encodings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/llmFunctionCallDemo.iml b/.idea/llmFunctionCallDemo.iml
new file mode 100644
index 0000000..d0876a7
--- /dev/null
+++ b/.idea/llmFunctionCallDemo.iml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..7323cf7
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..b94d445
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/env.toml b/env.toml
new file mode 100644
index 0000000..be5b064
--- /dev/null
+++ b/env.toml
@@ -0,0 +1,36 @@
+[base]
+# 多轮对话历史存储类型(memory:内存)
+history_chat_store = 'memory'
+# 相似度阈值
+similarity_threshold = 0.93
+
+####### 模型配置 #######
+[siliconflow]
+# 硅基流动
+# 密钥
+api_key = 'sk-rdoyeoxcyvqjynufqjmewmipwtvrhjjzerqlinpqxiodyafp'
+# 模型名称
+model = 'Qwen/QwQ-32B'
+# API地址
+base_url = 'https://api.siliconflow.cn/v1/'
+# 最大token数
+max_tokens = 4096
+# 温度系数
+temperature = 0.6
+# 是否流式返回
+streaming = true
+
+[ark]
+# 火山引擎
+# 密钥
+api_key = '4eefc827-187f-4756-9637-7e0153c93d81'
+# 模型名称
+model = 'deepseek-r1-250120'
+# API地址
+base_url = 'https://ark.cn-beijing.volces.com/api/v3/'
+# 最大token数
+max_tokens = 4096
+# 温度系数
+temperature = 0.6
+# 是否流式返回
+streaming = true
diff --git a/llm_agent.py b/llm_agent.py
deleted file mode 100644
index 015be6a..0000000
--- a/llm_agent.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Time : 2025/3/16 09:46
-# @Author : old-tom
-# @File : llm_agent
-# @Project : llmFunctionCallDemo
-# @Desc : llm代理,负责初始化模型
-
-from langchain_openai import ChatOpenAI
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.output_parsers import StrOutputParser
-
-# 默认系统提示词
-DEFAULT_SYS_PROMPT = ''
-
-parser = StrOutputParser()
-
-# 模型初始化,注意替换成自己的模型和api_key
-llm = ChatOpenAI(
- model='deepseek-r1-250120', api_key='4eefc827-187f-4756-9637-7e0153c93d81',
- base_url='https://ark.cn-beijing.volces.com/api/v3/', max_tokens=4096, temperature=0.5, streaming=True
-)
-
-# 工具函数定义,deepseek 官方定义的格式(最多支持 128 个 function),参考 https://api-docs.deepseek.com/zh-cn/guides/function_calling
-TOOLS = [
- {
- "type": "function",
- "function": {
- "name": "get_current_weather",
- "description": "Get weather of an location, the user shoud supply a location first",
- "parameters": {
- "type": "object",
- "properties": {
- "location": {
- "type": "string",
- "description": "The city and state, e.g. San Francisco, CA",
- }
- },
- "required": ["location"]
- },
- }
- },
- {
- "type": "function",
- "function": {
- "name": "play_video",
- "description": "播放、查看、打开实时视频",
- "parameters": {
- "type": "object",
- "properties": {
- "camera_name": {
- "type": "string",
- "description": "相机名称,例如:南卡口1号相机",
- }
- },
- "required": ["camera_name"]
- },
- }
- },
- {
- "type": "function",
- "function": {
- "name": "split_screen",
- "description": "切换分屏",
- "parameters": {
- "type": "object",
- "properties": {
- "split_n": {
- "type": "int",
- "description": "要切换的分屏数量,整数并且大于0,例如:1分屏、2分屏",
- }
- },
- "required": ["split_n"]
- },
- }
- },
- {
- "type": "function",
- "function": {
- "name": "play_video_record",
- "description": "播放、打开录像",
- "parameters": {
- "type": "object",
- "properties": {
- "camera_name": {
- "type": "string",
- "description": "相机名称,例如:南卡口1号相机",
- },
- "start_time": {
- "type": "string",
- "description": "录像开始时间,格式为yyyy-MM-dd hh:mm:ss,例 2025-03-16 01:00:00",
- },
- "end_time": {
- "type": "string",
- "description": "录像结束时间,格式为yyyy-MM-dd hh:mm:ss,例 2025-03-16 02:09:31",
- }
- },
- "required": ["camera_name", "start_time", "end_time"]
- },
- }
- },
- {
- "type": "function",
- "function": {
- "name": "switch_page",
- "description": "打开、跳转页面",
- "parameters": {
- "type": "object",
- "properties": {
- "page_name": {
- "type": "string",
- "description": "页面中文名称或者缩写,例:人员核查、系统日志、设备管理、首页",
- }
- },
- "required": ["page_name"]
- },
- }
- },
- {
- "type": "function",
- "function": {
- "name": "zoom_in",
- "description": "放大电子地图",
- "parameters": {
- "type": "object",
- "properties": {
- "level_n": {
- "type": "int",
- "description": "放大等级,整数并且大于0小于5,例如:放大1级、放大2级",
- }
- },
- "required": ["level_n"]
- },
- }
- },
- {
- "type": "function",
- "function": {
- "name": "view_flight_details",
- "description": "查询指定机场指定航班及时间的出入境人员明细",
- "parameters": {
- "type": "object",
- "properties": {
- "airport_name": {
- "type": "string",
- "description": "机场名称,简体中文,可以是缩写,例如:成都天府机场、天府机场、长水机场、上海浦东机场",
- },
- "flight_code": {
- "type": "string",
- "description": "航班编号,由字母+数字组成的完整编号,若编号包含多余字符(如标点符号),需过滤后保留有效部分",
- },
- "flight_date": {
- "type": "string",
- "description": "提取完整日期(年月日),自动补零至标准格式 yyyy-MM-dd, 例:2025-03-16",
- },
- "ie_type": {
- "type": "string",
- "description": "出入境类型,仅识别'入境'或'出境'两种类型",
- }
- },
- "required": ["airport_name", "flight_code", "flight_date"]
- },
- }
- }
-]
-
-
-class DeepSeekR1Agent(object):
- def __init__(self, system_prompt: str = DEFAULT_SYS_PROMPT):
- """
- :param system_prompt: 系统提示词
- """
- self.prompt = ChatPromptTemplate(
- [
- ("system", system_prompt),
- ("human", "{user_input}")
- ]
- )
-
- def invoke(self, user_input: str):
- """
- 请求模型并一次性返回
- :param user_input: 用户输入
- :return:
- """
- chain = self.prompt | llm
- return chain.invoke({
- 'user_input': user_input
- }).content
-
- def invoke_by_stream(self, user_input: str):
- """
- 请求模型并流式返回(同步流)
- :param user_input: 用户输入
- :return:
- """
- chain = self.prompt | llm | parser
- response = chain.stream({'user_input': user_input})
- for chunk in response:
- print(chunk, flush=True, end='')
-
- @staticmethod
- def invoke_with_tool(user_input: str):
- """
- 工具链调用,function calling时system prompt不会生效
- :param user_input:
- :return: 这里返回的是LLM推理出的tool信息,格式如下:
- [{'name': 'get_current_weather', 'args': {'location': 'Beijing, China'}, 'id': 'call_xeeq4q52fw9x61lkrqwy9cr6', 'type': 'tool_call'}]
- """
- llm_with_tools = llm.bind_tools(TOOLS)
- return llm_with_tools.invoke(user_input).tool_calls
diff --git a/llmagent/__init__.py b/llmagent/__init__.py
new file mode 100644
index 0000000..864e68a
--- /dev/null
+++ b/llmagent/__init__.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Time : 2025/3/29 08:28
+# @Author : old-tom
+# @File : __init__.py
+# @Project : llmFunctionCallDemo
+# @Desc : 初始化
+# 提示词模版
+PROMPT_TEMPLATE = {
+ 'VOICE_ASSISTANT': {
+ 'description': '语音助手',
+ 'template': """
+ 你是智能语音助手, 用户将会告诉你一条指令 {user_input}, 请根据指令调用工具完成用户任务。
+ # 要求
+ 1.你每条消息可以使用一个工具,你可以逐步使用工具来完成给定的任务,每次工具使用都会根据前一次工具使用的结果来决定。关键是你要考虑每个可用工具,并使用最适合任务当前步骤的工具
+ # 工具使用规则
+ 1. 根据指令和提供的工具描述选择最合适的工具,并仔细阅读工具参数说明,评估用户输入的参数是否满足条件,如果参数不满足则需要提示用户重新发出指令。
+ 2. 如果需要多个操作,则每条消息一次只使用一个工具来迭代完成任务,每次工具使用都基于上一次工具使用的结果。不要假设任何工具使用的结果。每一步都必须由前一步的结果来指导。
+ 3. 在每次使用工具后,切勿假设工具使用成功,禁止猜测工具结果。
+ 4. 根据工具返回的内容组装逻辑清晰的回答
+ 5. 如果工具返回多个结果,例如:找到以下相机,请选择一个:['北卡口入境摄像头出场1号通道', '北卡口出口道路监控', '北卡口入境摄像头出场2号通道']。
+ 需要将多个结果组装为询问句,例如:请确认您要查看的相机具体名称:
+ 1. 北卡口入境摄像头出场1号通道
+ 2. 北卡口出口道路监控
+ 3. 北卡口入境摄像头出场2号通道
+ 您需要选择哪个选项?(请回复选项前的数字)
+ 6. 如果工具返回单个结果,不要有多余输出及思考过程。强制格式为:正在执行xxx操作,请等待完成。
+ 7. 在任何情况下,都不要修改或扩展提供的工具参数
+
+ 逐步进行,在每次使用工具后等待用户的消息再继续任务至关重要。这种方法使你能够:
+ 1. 在继续之前确认每个步骤的成功。
+ 2. 立即解决出现的任何问题或错误。
+ 3. 根据新信息或意外结果调整你的方法。
+ 4. 确保每个操作都正确地基于前面的操作。
+ 通过等待并仔细考虑用户在每次使用工具后的回应,你可以相应地做出反应并做出关于如何继续任务的明智决定。这个迭代过程有助于确保你工作的整体成功和准确性。
+ 用户的指令是:{user_input}
+ """
+ }
+}
diff --git a/llmagent/llm_agent.py b/llmagent/llm_agent.py
new file mode 100644
index 0000000..a887015
--- /dev/null
+++ b/llmagent/llm_agent.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Time : 2025/3/16 09:46
+# @Author : old-tom
+# @File : llm_agent
+# @Project : llmFunctionCallDemo
+# @Desc : llm代理
+
+from llmagent.llm_config import LLMConfigLoader
+from abc import ABC
+from llmtools import TOOLS_BIND_FUNCTION, STRUCT_TOOLS
+from llmagent import PROMPT_TEMPLATE
+from langchain_openai import ChatOpenAI
+from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
+from langchain_core.output_parsers import StrOutputParser
+from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
+from langchain_core.runnables.history import RunnableWithMessageHistory
+from langchain_core.messages import HumanMessage
+from log_conf import log
+
+# 默认系统提示词
+DEFAULT_SYS_PROMPT = ''
+
+parser = StrOutputParser()
+
+# 模型初始化,注意修改env.toml中的配置
+# llm_conf = LLMConfigLoader.load(item_name='ark')
+llm_conf = LLMConfigLoader.load(item_name='siliconflow')
+llm = ChatOpenAI(
+ model=llm_conf.model, api_key=llm_conf.api_key,
+ base_url=llm_conf.base_url, max_tokens=llm_conf.max_tokens,
+ temperature=llm_conf.temperature,
+ streaming=llm_conf.streaming
+)
+
+# 历史消息存储(内存)
+his_store = {}
+
+
+def get_session_history(session_id: str) -> BaseChatMessageHistory:
+ """
+ 获取历史消息
+ :param session_id:
+ :return:
+ """
+ if session_id not in his_store:
+ # 内存存储(可以替换为数据库或者其他,参考 BaseChatMessageHistory 实现类)
+ his_store[session_id] = InMemoryChatMessageHistory()
+ return his_store[session_id]
+
+
+class BaseChatAgent(ABC):
+ """
+ 抽象Agent类
+ """
+
+ def __init__(self, system_prompt: str = DEFAULT_SYS_PROMPT):
+ """
+ :param system_prompt: 系统提示词
+ """
+ # 单轮对话提示词模版
+ self.prompt = ChatPromptTemplate(
+ [
+ ("system", system_prompt),
+ ("human", "{user_input}")
+ ]
+ )
+ # 多轮对话提示词模版
+ self.multi_round_prompt = ChatPromptTemplate.from_messages([
+ ("system", system_prompt),
+ MessagesPlaceholder(variable_name="messages")
+ ])
+
+ def invoke(self, user_input: str) -> str:
+ """
+ 请求模型并一次性返回
+ :param user_input: 用户输入
+ :return:
+ """
+ chain = self.prompt | llm
+ return chain.invoke({
+ 'user_input': user_input
+ }).content
+
+ def invoke_by_stream(self, user_input: str):
+ """
+ 请求模型并流式返回(同步流)
+ :param user_input: 用户输入
+ :return:
+ """
+ chain = self.prompt | llm | parser
+ response = chain.stream({'user_input': user_input})
+ for chunk in response:
+ print(chunk, flush=True, end='')
+
+ def multi_round_with_stream(self, user_input: str, session_id: int):
+ """
+ 多轮对话
+ :param user_input: 用户输入
+ :param session_id: 对话sessionId
+ :return:
+ """
+ config = {"configurable": {"session_id": session_id}}
+ chain = self.multi_round_prompt | llm | parser
+ with_message_history = RunnableWithMessageHistory(chain, get_session_history, input_messages_key="messages")
+ response = with_message_history.stream({
+ 'messages': [HumanMessage(content=user_input)]
+ }, config=config)
+ for chunk in response:
+ print(chunk, flush=True, end='')
+
+ @staticmethod
+ def invoke_with_tool(user_input: str):
+ """
+ 工具调用,function calling时system prompt不会生效,并且不支持流式返回
+ :param user_input:
+ :return: 这里返回的是LLM推理出的tool信息,格式如下:
+ [{'name': 'get_current_weather', 'args': {'location': 'Beijing, China'}, 'id': 'call_xeeq4q52fw9x61lkrqwy9cr6', 'type': 'tool_call'}]
+ """
+ llm_with_tools = llm.bind_tools(STRUCT_TOOLS)
+ return llm_with_tools.invoke(user_input).tool_calls
+
+ @staticmethod
+ def invoke_with_tool_call(user_input: str):
+ """
+ 单轮对话,调用工具并返给LLM
+ :param user_input:
+ :return:
+ """
+ # 自定义的提示词
+ user_msg = PROMPT_TEMPLATE.get('VOICE_ASSISTANT')['template'].format(user_input=user_input)
+ messages = [HumanMessage(user_msg)]
+ llm_with_tools = llm.bind_tools(STRUCT_TOOLS)
+ # 这里是判断使用哪个工具,需要加提示限制模型不能修改参数
+ call_msg = llm_with_tools.invoke(user_input)
+ messages.append(call_msg)
+ for tool_call in call_msg.tool_calls:
+ selected_tool = TOOLS_BIND_FUNCTION[tool_call["name"].lower()]
+ # 使用 tool_call 调用会生成ToolMessage
+ tool_msg = selected_tool.invoke(tool_call)
+ messages.append(tool_msg)
+ log.info('【function call】构造输入为{}', messages)
+ # messages 中包含了 人类指令、AI指令、工具指令
+ return llm_with_tools.invoke(messages).content
+
+
+class ChatAgent(BaseChatAgent):
+ pass
diff --git a/llmagent/llm_config.py b/llmagent/llm_config.py
new file mode 100644
index 0000000..fdf5179
--- /dev/null
+++ b/llmagent/llm_config.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Time : 2025/3/29 08:49
+# @Author : old-tom
+# @File : llm_config
+# @Project : llmFunctionCallDemo
+# @Desc : llm配置文件解析
+import os
+
+from pydantic import BaseModel
+import toml
+
+# 默认配置文件名
+DEFAULT_CONF_NAME = 'env.toml'
+path = os.path.dirname(__file__)
+path = os.path.dirname(path)
+# 默认配置文件位置
+DEFAULT_CONF_PATH = os.path.join(path, DEFAULT_CONF_NAME)
+
+
+class ConfigNotFoundError(Exception):
+ """
+ 配置不存在异常
+ """
+
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class LLMConf(BaseModel):
+ api_key: str
+ model: str
+ base_url: str
+ max_tokens: int
+ temperature: float
+ streaming: bool = True
+
+
+class LLMConfigLoader(object):
+ @staticmethod
+ def load(item_name, conf_path: str = DEFAULT_CONF_PATH) -> LLMConf:
+ """
+ 校验并加载配置
+ :return:
+ """
+ if not os.path.isfile(conf_path):
+ raise ConfigNotFoundError(f'模型配置文件{DEFAULT_CONF_NAME}不存在')
+ conf = toml.load(conf_path)
+ return LLMConf(**conf[item_name])
\ No newline at end of file
diff --git a/llmtools/__init__.py b/llmtools/__init__.py
index dd8a46b..d06595a 100644
--- a/llmtools/__init__.py
+++ b/llmtools/__init__.py
@@ -5,3 +5,18 @@
# @File : __init__.py
# @Project : llmFunctionCallDemo
# @Desc : LLM function calling tools 定义
+
+from llmtools.tool_impl import *
+
+# tools定义,用于llm 绑定函数
+STRUCT_TOOLS = [play_video, split_screen, play_video_record, switch_page, zoom_in, view_flight_details]
+
+# tools定义与实际函数绑定,用于函数调用
+TOOLS_BIND_FUNCTION = {
+ "play_video": play_video,
+ "split_screen": split_screen,
+ "play_video_record": play_video_record,
+ "switch_page": switch_page,
+ "zoom_in": zoom_in,
+ "view_flight_details": view_flight_details
+}
diff --git a/llmtools/basic_tool.py b/llmtools/basic_tool.py
deleted file mode 100644
index e0ceba7..0000000
--- a/llmtools/basic_tool.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Time : 2025/3/16 16:38
-# @Author : old-tom
-# @File : basic_tool
-# @Project : llmFunctionCallDemo
-# @Desc :
-
-import json
-from langchain_core.tools import tool
-
-@tool
-def get_current_weather(location: str, unit: str = "celsius") -> str:
- """Get the current weather in a given location
-
- Args:
- location (str): location of the weather.
- unit (str): unit of the tempuature.
-
- Returns:
- str: weather in the given location.
- """
-
- weather_info = {
- "location": location,
- "temperature": "27",
- "unit": unit,
- "forecast": ["sunny", "windy"],
- }
- return json.dumps(weather_info)
diff --git a/llmtools/tool_impl.py b/llmtools/tool_impl.py
new file mode 100644
index 0000000..1511786
--- /dev/null
+++ b/llmtools/tool_impl.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Time : 2025/3/16 16:38
+# @Author : old-tom
+# @File : basic_tool
+# @Project : llmFunctionCallDemo
+# @Desc : 工具实现, @tool 装饰器是langchain提供的,包含了invoke方法可以直接调用。并且会自动返回ToolMessage,缺点是没有自定义异常处理
+# 此外还有其他定义tool方法,参考 https://www.langchain.com.cn/docs/how_to/custom_tools/
+
+from typing import Annotated
+from langchain_core.tools import tool
+from vector_db import query_vector_db
+from log_conf import log
+
+
+@tool("play_video", description="播放、查看、打开实时视频")
+def play_video(camera_name: Annotated[str, "相机名称,例如:南卡口1号相机"]) -> str:
+ camera_info = query_camera_from_db(camera_name)
+ if camera_info:
+ if len(camera_info) > 1:
+ # TODO 需要多轮对话,确认相机名称
+ hit_camera_names = [x['carme_name'] for x in camera_info]
+ return f"找到以下相机,请选择一个:{hit_camera_names}"
+ else:
+ # TODO 调用业务系统
+ return f"正在打开{camera_name},请等待操作完成"
+ else:
+ return "未找到该相机,请尝试其他名称"
+
+
+@tool("split_screen", description="切换分屏")
+def split_screen(split_n: Annotated[int, "要切换的分屏数量,整数并且大于0,例如:1分屏、2分屏"]) -> str:
+ return f"正在切换到{split_n}分屏,请等待操作完成"
+
+
+@tool("play_video_record", description="播放、打开录像")
+def play_video_record(camera_name: Annotated[str, "相机名称,例如:南卡口1号相机"],
+ start_time: Annotated[str, "录像开始时间,格式为yyyy-MM-dd hh:mm:ss,例 2025-03-16 01:00:00"],
+ end_time: Annotated[str, "录像结束时间,格式为yyyy-MM-dd hh:mm:ss,例 2025-03-16 02:09:31"]) -> str:
+ pass
+
+
+@tool("switch_page", description="打开、跳转页面")
+def switch_page(page_name: Annotated[str, "页面中文名称或者缩写,例:人员核查、系统日志、设备管理、首页"]) -> str:
+ pass
+
+
+@tool("zoom_in", description="放大电子地图")
+def zoom_in(level_n: Annotated[int, "放大等级,整数并且大于0小于5,例如:放大1级、放大2级"]) -> str:
+ pass
+
+
+@tool("view_flight_details", description="查询指定机场指定航班及时间的出入境人员明细")
+def view_flight_details(
+ airport_name: Annotated[str, "机场名称,简体中文,可以是缩写,例如:成都天府机场、天府机场、长水机场、上海浦东机场"],
+ flight_code: Annotated[
+ str, "航班编号,由字母+数字组成的完整编号,若编号包含多余字符(如标点符号),需过滤后保留有效部分"],
+ flight_date: Annotated[str, "提取完整日期(年月日),自动补零至标准格式 yyyy-MM-dd, 例:2025-03-16"],
+ ie_type: Annotated[str, "出入境类型,仅识别'入境'或'出境'两种类型"]) -> str:
+ return f"{airport_name}航班号{flight_code}{flight_date}{ie_type}数据,共100人乘机,起飞准时,晚点降落"
+
+
+def query_camera_from_db(camera_name: str, top_n: int = 3) -> str:
+ """
+ 相机名称查询向量库,根据相似度阈值取top_one或者top_n
+ :param camera_name: 相机名称
+ :param top_n: 返回前N个
+ :return:
+ """
+ rt = query_vector_db(camera_name)
+ if rt:
+ log.info('【function call】相机相似度检索查询 {},返回 {}', camera_name, rt)
+ # 判断相似度最高的相机是否超过阈值
+ top_one = rt['hits'][0]
+ # 相似度评分
+ score = top_one['_score']
+ if score > 0.93:
+ return rt['hits'][0:1]
+ else:
+ return rt['hits'][0:top_n]
diff --git a/log_conf.py b/log_conf.py
new file mode 100644
index 0000000..4a26ad1
--- /dev/null
+++ b/log_conf.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Time : 2025/3/29 20:49
+# @Author : old-tom
+# @File : log_conf
+# @Project : llmFunctionCallDemo
+# @Desc : 日志配置
+
+import sys
+import os
+from loguru import logger
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+
+# 日志输出路径
+LOG_PATH = os.path.join(BASE_DIR, r'logout/logout.log')
+
+
+class Logger(object):
+ def __init__(self):
+ self.logger = logger
+ self.logger.remove()
+ self.logger.add(sys.stdout,
+ format="{time:YYYY-MM-DD HH:mm:ss} | " # 颜色>时间
+ "{process.name} | " # 进程名
+ "{thread.name} | " # 进程名
+ "{module}.{function}" # 模块名.方法名
+ ":{line} | " # 行号
+ "{level}: " # 等级
+ "{message}", # 日志内容
+ )
+ # 输出到文件的格式,注释下面的add',则关闭日志写入
+ self.logger.add(LOG_PATH, level='DEBUG',
+ format='{time:YYYY-MM-DD HH:mm:ss} - ' # 时间
+ "{process.name} | " # 进程名
+ "{thread.name} | " # 进程名
+ '{module}.{function}:{line} - {level} -{message}', # 模块名.方法名:行号
+ rotation="10 MB")
+
+ def get_logger(self):
+ return self.logger
+
+
+log = Logger().get_logger()
diff --git a/main.py b/main.py
index 2de3621..cad3a8c 100644
--- a/main.py
+++ b/main.py
@@ -6,15 +6,16 @@
# @Project : llmFunctionCallDemo
# @Desc : llm代理,负责初始化模型
# Press the green button in the gutter to run the script.
-from llm_agent import DeepSeekR1Agent
+from llmagent.llm_agent import ChatAgent
+
+dsr = ChatAgent()
if __name__ == '__main__':
- dsr = DeepSeekR1Agent()
- # print(dsr.invoke_with_tool('今天北京天气怎么样'))
- ########## 测试 模型function call #########
- # print(dsr.invoke_with_tool('播放北卡口1号道相机'))
+ #print(dsr.invoke_with_tool_call('今天昆明天怎么样'))
+ ########## 测试 function call #########
+ # print(dsr.invoke_with_tool_call('播放南卡口出境2号相机'))
## [{'name': 'play_video', 'args': {'camera_name': '北卡口1号道相机'}, 'id': 'call_apnb8fiqdkaz313katcs3tjf', 'type': 'tool_call'}]
- # print(dsr.invoke_with_tool('将大屏切换为2分屏'))
+ # print(dsr.invoke_with_tool_call('将大屏切换为-1分屏'))
## [{'name': 'split_screen', 'args': {'split_n': 2}, 'id': 'call_2o7c94f591xag8p6lcyice9q', 'type': 'tool_call'}]
# print(dsr.invoke_with_tool('播放北卡口入境1号道录像,从今天到2025-03-16 02:09:31'))
## 由于大模型没有联网,所以无法判断‘今天’
@@ -29,5 +30,11 @@ if __name__ == '__main__':
## [{'name': 'switch_page', 'args': {'page_name': '系统日志'}, 'id': 'call_acwy2yk7xz3bgpt28suujca2', 'type': 'tool_call'}]
# print(dsr.invoke_with_tool('放大地图到2级'))
## [{'name': 'zoom_in', 'args': {'level_n': 2}, 'id': 'call_ip1kang0lbr63y8gya8nrpwc', 'type': 'tool_call'}]
- # print(dsr.invoke_with_tool('查看成都天府k00航班2004年1月1日入境预报航班人员明细'))
+ # print(dsr.invoke_with_tool_call('查看成都天府k00航班2004年1月1日入境预报航班人员明细'))
## [{'name': 'view_flight_details', 'args': {'airport_name': '成都天府机场', 'flight_code': 'K00', 'flight_date': '2004-01-01', 'ie_type': '入境'}, 'id': 'call_igummeorjq4r2pqjyr9tq6xq', 'type': 'tool_call'}]
+
+ ########## 测试 多轮对话 #########
+ dsr.multi_round_with_stream('你是什么模型', 1)
+ dsr.multi_round_with_stream('你能做什么', 1)
+ dsr.multi_round_with_stream('我的上一个问题是什么?请直接返回问题,不要有多余输出及思考过程', 1)
+ dsr.multi_round_with_stream('我的第一个问题是什么?请直接返回问题,不要有多余输出及思考过程', 1)
diff --git a/requirements.txt b/requirements.txt
index 9609713..6523f27 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,33 +1,55 @@
+aiohappyeyeballs==2.6.1
+aiohttp==3.11.14
+aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.8.0
+async-timeout==4.0.3
+attrs==25.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
+dataclasses-json==0.6.7
distro==1.9.0
exceptiongroup==1.2.2
+frozenlist==1.5.0
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
+httpx-sse==0.4.0
idna==3.10
jiter==0.9.0
jsonpatch==1.33
jsonpointer==3.0.0
+langchain==0.3.21
+langchain-community==0.3.20
langchain-core==0.3.45
langchain-openai==0.3.8
+langchain-text-splitters==0.3.7
langsmith==0.3.15
marqo==3.11.0
+marshmallow==3.26.1
+multidict==6.2.0
+mypy-extensions==1.0.0
+numpy==2.2.4
openai==1.66.3
orjson==3.10.15
packaging==24.2
+propcache==0.3.1
pydantic==2.10.6
+pydantic-settings==2.8.1
pydantic_core==2.27.2
+python-dotenv==1.1.0
PyYAML==6.0.2
regex==2024.11.6
requests==2.32.3
requests-toolbelt==1.0.0
sniffio==1.3.1
+SQLAlchemy==2.0.40
tenacity==9.0.0
tiktoken==0.9.0
+toml==0.10.2
tqdm==4.67.1
+typing-inspect==0.9.0
typing_extensions==4.12.2
urllib3==2.3.0
+yarl==1.18.3
zstandard==0.23.0
diff --git a/vector_agent.py b/vector_db.py
similarity index 99%
rename from vector_agent.py
rename to vector_db.py
index b5021c6..38ad4da 100644
--- a/vector_agent.py
+++ b/vector_db.py
@@ -619,7 +619,7 @@ def query_vector_db(query):
if __name__ == '__main__':
# create_and_set_index()
- rt = query_vector_db('H9861号口摄像头')
+ rt = query_vector_db('南卡口出境2号相机')
# TODO 根据 _score字段 取出相似度最高的结果
if rt:
for ele in rt['hits']: