MCP模型上下文协议实战:使用TKinter构建桌面AI助手
本文介绍了使用Python TKinter框架构建桌面AI助手应用,并集成MCP(Model Context Protocol)模型上下文协议。项目具备图形化对话界面、智能上下文管理、工具调用等功能。MCP协议为桌面应用提供了状态管理、离线能力、资源优化等优势。文章详细展示了MCP协议的核心实现,包括上下文数据结构设计和上下文管理器,支持多轮对话、会话切换和本地存储。通过dataclass定义的消
·
MCP模型上下文协议实战:使用TKinter构建桌面AI助手

一、项目概述
在本文中,我们将使用Python的TKinter框架构建一个桌面AI助手应用,并集成MCP(Model Context Protocol)模型上下文协议。这个应用将具备以下功能:
- 图形化用户界面进行人机对话
- 智能上下文管理,支持多轮对话
- 工具调用功能(如计算器、天气查询等)
- 对话历史保存和会话管理
- 实时响应显示
二、MCP协议在桌面应用中的价值
对于TKinter桌面应用,MCP协议提供了以下优势:
- 状态管理:桌面应用需要维护复杂的用户交互状态
- 离线能力:可以在本地缓存上下文,支持断网续聊
- 资源优化:减少不必要的API调用,节省token成本
- 用户体验:提供流畅的多轮对话体验
三、项目结构设计
mcp_tkinter_assistant/
├── main.py # 主程序入口
├── mcp_protocol.py # MCP协议核心实现
├── ai_service.py # AI服务集成
├── tools.py # 工具函数集合
├── config.py # 配置文件
└── assets/ # 资源文件(图标、样式等)
四、MCP协议核心实现
4.1 MCP上下文数据结构
# mcp_protocol.py
from dataclasses import dataclass, field
from typing import List, Dict, Optional, Any
from enum import Enum
import json
import uuid
from datetime import datetime
class ContextType(Enum):
CHAT = "chat"
TASK = "task"
SEARCH = "search"
@dataclass
class MCPMessage:
"""MCP消息结构"""
role: str # "user", "assistant", "system", "tool"
content: str
timestamp: datetime = field(default_factory=datetime.now)
message_id: str = field(default_factory=lambda: str(uuid.uuid4()))
def to_dict(self) -> Dict[str, Any]:
return {
"role": self.role,
"content": self.content,
"timestamp": self.timestamp.isoformat(),
"message_id": self.message_id
}
@dataclass
class MCPTool:
"""MCP工具定义"""
name: str
description: str
parameters: Dict[str, Any]
enabled: bool = True
@dataclass
class MCPContext:
"""MCP上下文协议主结构"""
session_id: str = field(default_factory=lambda: str(uuid.uuid4()))
user_id: str = "desktop_user"
context_type: ContextType = ContextType.CHAT
messages: List[MCPMessage] = field(default_factory=list)
tools: List[MCPTool] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
state: Dict[str, Any] = field(default_factory=dict)
created_at: datetime = field(default_factory=datetime.now)
updated_at: datetime = field(default_factory=datetime.now)
def add_message(self, role: str, content: str):
"""添加消息到上下文"""
message = MCPMessage(role=role, content=content)
self.messages.append(message)
self.updated_at = datetime.now()
def get_messages_for_llm(self) -> List[Dict[str, str]]:
"""获取适合LLM的格式化消息列表"""
return [msg.to_dict() for msg in self.messages]
def to_dict(self) -> Dict[str, Any]:
"""序列化为字典"""
return {
"session_id": self.session_id,
"user_id": self.user_id,
"context_type": self.context_type.value,
"messages": [msg.to_dict() for msg in self.messages],
"tools": [tool.__dict__ for tool in self.tools],
"metadata": self.metadata,
"state": self.state,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat()
}
4.2 MCP上下文管理器
# mcp_protocol.py (续)
import os
import pickle
from pathlib import Path
class MCPContextManager:
"""MCP上下文管理器 - 专为桌面应用优化"""
def __init__(self, storage_path: str = "contexts"):
self.storage_path = Path(storage_path)
self.storage_path.mkdir(exist_ok=True)
self.current_context: Optional[MCPContext] = None
self.contexts: Dict[str, MCPContext] = {}
self.load_all_contexts()
def create_context(self, context_type: ContextType = ContextType.CHAT) -> MCPContext:
"""创建新上下文"""
context = MCPContext(context_type=context_type)
# 添加系统消息
context.add_message("system",
"你是一个桌面AI助手,帮助用户完成各种任务。你可以使用工具来执行具体操作。")
self.contexts[context.session_id] = context
self.current_context = context
self.save_context(context)
return context
def switch_context(self, session_id: str) -> bool:
"""切换到指定上下文"""
if session_id in self.contexts:
self.current_context = self.contexts[session_id]
return True
return False
def get_current_context(self) -> Optional[MCPContext]:
"""获取当前上下文"""
return self.current_context
def add_user_message(self, content: str):
"""添加用户消息"""
if self.current_context:
self.current_context.add_message("user", content)
self.save_context(self.current_context)
def add_assistant_message(self, content: str):
"""添加助手消息"""
if self.current_context:
self.current_context.add_message("assistant", content)
self.save_context(self.current_context)
def save_context(self, context: MCPContext):
"""保存上下文到文件"""
filepath = self.storage_path / f"{context.session_id}.pkl"
with open(filepath, 'wb') as f:
pickle.dump(context, f)
def load_context(self, session_id: str) -> Optional[MCPContext]:
"""从文件加载上下文"""
filepath = self.storage_path / f"{session_id}.pkl"
if filepath.exists():
with open(filepath, 'rb') as f:
context = pickle.load(f)
self.contexts[session_id] = context
return context
return None
def load_all_contexts(self):
"""加载所有上下文"""
for filepath in self.storage_path.glob("*.pkl"):
session_id = filepath.stem
context = self.load_context(session_id)
if context:
self.contexts[session_id] = context
def get_context_list(self) -> List[Dict[str, Any]]:
"""获取上下文列表用于UI显示"""
context_list = []
for session_id, context in self.contexts.items():
last_message = context.messages[-1].content if context.messages else "新会话"
context_list.append({
"session_id": session_id,
"created_at": context.created_at.strftime("%Y-%m-%d %H:%M"),
"last_message": last_message[:50] + "..." if len(last_message) > 50 else last_message,
"message_count": len(context.messages)
})
# 按创建时间倒序排列
context_list.sort(key=lambda x: x["created_at"], reverse=True)
return context_list
五、AI服务集成
# ai_service.py 国内GLM模型
from zhipuai import ZhipuAI
import json
from typing import List, Dict, Any, Optional
from mcp_protocol import MCPContext, MCPTool
class AIService:
"""AI服务封装类 - 使用 GLM-4"""
def __init__(self, api_key: str):
self.client = ZhipuAI(api_key=api_key)
# 定义可用工具(GLM-4 使用与 OpenAI 类似的工具格式)
self.available_tools = [
MCPTool(
name="calculate",
description="执行数学计算",
parameters={
"type": "object",
"properties": {
"expression": {"type": "string", "description": "数学表达式"}
},
"required": ["expression"]
}
),
MCPTool(
name="get_current_time",
description="获取当前时间",
parameters={
"type": "object",
"properties": {},
"required": []
}
)
]
def process_with_context(self, context: MCPContext) -> Dict[str, Any]:
"""使用MCP上下文调用 GLM-4"""
try:
# 准备消息
messages = self._prepare_messages(context)
# 准备工具(转换为 GLM-4 支持的格式)
tools = self._convert_tools_for_glm()
# 调用 GLM-4
response = self.client.chat.completions.create(
model="glm-4", # 或 "glm-4-plus"、"glm-4-flash" 等
messages=messages,
tools=tools if tools else None,
tool_choice="auto"
)
# GLM-4 的响应结构与 OpenAI 高度兼容
message = response.choices[0].message
return {
"success": True,
"response": {
"role": message.role,
"content": message.content,
"tool_calls": [
{
"id": tool_call.index, # GLM 使用 index 作为 ID
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments
}
}
for tool_call in (message.tool_calls or [])
] if message.tool_calls else None
},
"usage": response.usage
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def _prepare_messages(self, context: MCPContext) -> List[Dict[str, Any]]:
"""准备发送给 GLM-4 的消息"""
messages = []
for msg in context.messages:
if msg.role == "tool":
# GLM-4 要求工具消息格式为特定结构
try:
tool_result = json.loads(msg.content)
messages.append({
"role": "tool",
"content": json.dumps(tool_result, ensure_ascii=False),
"tool_call_id": getattr(msg, 'tool_call_id', 'default_id')
})
except:
# 兜底处理
messages.append({
"role": "user",
"content": f"[工具结果] {msg.content}"
})
else:
messages.append({
"role": msg.role,
"content": msg.content
})
return messages
def _convert_tools_for_glm(self) -> List[Dict[str, Any]]:
"""将MCP工具转换为 GLM-4 工具格式"""
glm_tools = []
for tool in self.available_tools:
if tool.enabled:
glm_tools.append({
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters
}
})
return glm_tools
# ai_service.py 使用openAI大模型
import openai
import json
from typing import List, Dict, Any, Optional
from mcp_protocol import MCPContext, MCPTool
class AIService:
"""AI服务封装类"""
def __init__(self, api_key: str):
self.api_key = api_key
openai.api_key = api_key
# 定义可用工具
self.available_tools = [
MCPTool(
name="calculate",
description="执行数学计算",
parameters={
"type": "object",
"properties": {
"expression": {"type": "string", "description": "数学表达式"}
},
"required": ["expression"]
}
),
MCPTool(
name="get_current_time",
description="获取当前时间",
parameters={
"type": "object",
"properties": {},
"required": []
}
)
]
def process_with_context(self, context: MCPContext) -> Dict[str, Any]:
"""使用MCP上下文处理请求"""
try:
# 准备消息
messages = self._prepare_messages(context)
# 准备工具定义(转换为OpenAI格式)
tools = self._convert_tools_for_openai()
# 调用OpenAI API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
tools=tools if tools else None,
tool_choice="auto"
)
return {
"success": True,
"response": response.choices[0].message,
"usage": response.usage
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def _prepare_messages(self, context: MCPContext) -> List[Dict[str, str]]:
"""准备发送给LLM的消息"""
messages = []
for msg in context.messages:
messages.append({
"role": msg.role,
"content": msg.content
})
return messages
def _convert_tools_for_openai(self) -> List[Dict[str, Any]]:
"""将MCP工具转换为OpenAI工具格式"""
openai_tools = []
for tool in self.available_tools:
if tool.enabled:
openai_tools.append({
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters
}
})
return openai_tools
六、工具函数实现
# tools.py
import datetime
import re
from typing import Dict, Any
class ToolExecutor:
"""工具执行器"""
@staticmethod
def calculate(expression: str) -> Dict[str, Any]:
"""安全的数学计算"""
try:
# 清理表达式,只允许数字、运算符和括号
cleaned_expr = re.sub(r'[^0-9+\-*/().\s]', '', expression)
result = eval(cleaned_expr, {"__builtins__": {}}, {})
return {"success": True, "result": str(result)}
except Exception as e:
return {"success": False, "error": f"计算错误: {str(e)}"}
@staticmethod
def get_current_time() -> Dict[str, Any]:
"""获取当前时间"""
now = datetime.datetime.now()
return {
"success": True,
"result": now.strftime("%Y年%m月%d日 %H:%M:%S")
}
@staticmethod
def execute_tool(tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""执行指定工具"""
if tool_name == "calculate":
return ToolExecutor.calculate(arguments.get("expression", ""))
elif tool_name == "get_current_time":
return ToolExecutor.get_current_time()
else:
return {"success": False, "error": f"未知工具: {tool_name}"}
七、TKinter界面实现
7.1 主窗口类
# main.py
import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox
import threading
import json
from mcp_protocol import MCPContextManager, ContextType
from ai_service import AIService
from tools import ToolExecutor
import os
class MCPDesktopAssistant:
"""MCP桌面AI助手主窗口"""
def __init__(self, root):
self.root = root
self.root.title("MCP桌面AI助手")
self.root.geometry("800x600")
self.root.minsize(600, 400)
# 初始化服务
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
api_key = self._get_api_key_from_user()
if not api_key:
messagebox.showerror("错误", "需要OpenAI API密钥才能使用")
root.quit()
return
self.context_manager = MCPContextManager()
self.ai_service = AIService(api_key)
self.tool_executor = ToolExecutor()
# 创建当前上下文
self.context_manager.create_context(ContextType.CHAT)
# 创建UI
self._create_ui()
# 绑定事件
self.root.protocol("WM_DELETE_WINDOW", self._on_closing)
def _get_api_key_from_user(self) -> str:
"""从用户获取API密钥"""
dialog = tk.Toplevel(self.root)
dialog.title("输入API密钥")
dialog.geometry("400x150")
dialog.transient(self.root)
dialog.grab_set()
tk.Label(dialog, text="请输入OpenAI API密钥:").pack(pady=10)
api_key_var = tk.StringVar()
entry = tk.Entry(dialog, textvariable=api_key_var, width=50, show="*")
entry.pack(pady=5)
def on_ok():
dialog.result = api_key_var.get()
dialog.destroy()
def on_cancel():
dialog.result = ""
dialog.destroy()
button_frame = tk.Frame(dialog)
button_frame.pack(pady=10)
tk.Button(button_frame, text="确定", command=on_ok).pack(side=tk.LEFT, padx=5)
tk.Button(button_frame, text="取消", command=on_cancel).pack(side=tk.LEFT, padx=5)
self.root.wait_window(dialog)
return getattr(dialog, 'result', "")
def _create_ui(self):
"""创建用户界面"""
# 创建主框架
main_frame = ttk.Frame(self.root)
main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
# 创建左侧会话列表
left_frame = ttk.Frame(main_frame, width=200)
left_frame.pack(side=tk.LEFT, fill=tk.Y, padx=(0, 10))
left_frame.pack_propagate(False)
# 会话列表标题
ttk.Label(left_frame, text="会话历史", font=("Arial", 12, "bold")).pack(pady=(0, 10))
# 会话列表
self.session_listbox = tk.Listbox(left_frame, height=15)
self.session_listbox.pack(fill=tk.BOTH, expand=True)
self.session_listbox.bind('<<ListboxSelect>>', self._on_session_select)
# 新会话按钮
ttk.Button(left_frame, text="新会话", command=self._new_session).pack(pady=10)
# 创建右侧聊天区域
right_frame = ttk.Frame(main_frame)
right_frame.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True)
# 聊天显示区域
self.chat_display = scrolledtext.ScrolledText(
right_frame,
wrap=tk.WORD,
state=tk.DISABLED,
font=("Arial", 10)
)
self.chat_display.pack(fill=tk.BOTH, expand=True, pady=(0, 10))
# 输入区域
input_frame = ttk.Frame(right_frame)
input_frame.pack(fill=tk.X)
self.user_input = tk.Text(input_frame, height=3, font=("Arial", 10))
self.user_input.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=(0, 10))
self.user_input.bind("<Return>", self._on_enter_pressed)
self.user_input.bind("<Shift-Return>", lambda e: self.user_input.insert(tk.INSERT, "\n"))
send_button = ttk.Button(input_frame, text="发送", command=self._send_message)
send_button.pack(side=tk.RIGHT)
# 加载会话列表
self._load_session_list()
self._display_current_chat()
def _load_session_list(self):
"""加载会话列表"""
self.session_listbox.delete(0, tk.END)
contexts = self.context_manager.get_context_list()
for i, ctx in enumerate(contexts):
display_text = f"{ctx['created_at']} - {ctx['last_message']}"
self.session_listbox.insert(tk.END, display_text)
# 选中当前会话
if self.context_manager.current_context and ctx['session_id'] == self.context_manager.current_context.session_id:
self.session_listbox.selection_set(i)
def _new_session(self):
"""创建新会话"""
self.context_manager.create_context(ContextType.CHAT)
self._load_session_list()
self._display_current_chat()
def _on_session_select(self, event):
"""会话选择事件"""
selection = self.session_listbox.curselection()
if selection:
index = selection[0]
contexts = self.context_manager.get_context_list()
if index < len(contexts):
session_id = contexts[index]['session_id']
self.context_manager.switch_context(session_id)
self._display_current_chat()
def _display_current_chat(self):
"""显示当前聊天内容"""
self.chat_display.config(state=tk.NORMAL)
self.chat_display.delete(1.0, tk.END)
context = self.context_manager.get_current_context()
if context:
for message in context.messages:
if message.role == "user":
self._add_message_to_display("👤 用户", message.content, "blue")
elif message.role == "assistant":
self._add_message_to_display("🤖 助手", message.content, "green")
elif message.role == "tool":
self._add_message_to_display("🔧 工具", message.content, "purple")
self.chat_display.config(state=tk.DISABLED)
self.chat_display.see(tk.END)
def _add_message_to_display(self, sender: str, content: str, color: str):
"""添加消息到显示区域"""
self.chat_display.insert(tk.END, f"{sender}: {content}\n\n")
# 这里可以添加颜色标签,简化起见使用默认颜色
def _on_enter_pressed(self, event):
"""回车键发送消息"""
if not event.state & 0x1: # 没有Shift键
self._send_message()
return "break" # 阻止默认换行
def _send_message(self):
"""发送消息"""
user_message = self.user_input.get(1.0, tk.END).strip()
if not user_message:
return
# 清空输入框
self.user_input.delete(1.0, tk.END)
# 添加用户消息到上下文
self.context_manager.add_user_message(user_message)
self._display_current_chat()
# 在新线程中处理AI响应(避免UI阻塞)
threading.Thread(target=self._process_ai_response, args=(user_message,), daemon=True).start()
def _process_ai_response(self, user_message: str):
"""处理AI响应(在后台线程中执行)"""
try:
# 获取当前上下文
context = self.context_manager.get_current_context()
if not context:
return
# 调用AI服务
result = self.ai_service.process_with_context(context)
if result["success"]:
response_message = result["response"]
# 检查是否有工具调用
if response_message.get("tool_calls"):
# 执行工具调用
tool_results = []
for tool_call in response_message["tool_calls"]:
tool_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_result = self.tool_executor.execute_tool(tool_name, arguments)
tool_results.append({
"tool_call_id": tool_call["id"],
"role": "tool",
"name": tool_name,
"content": json.dumps(tool_result)
})
# 将工具结果添加到上下文
for tool_result in tool_results:
self.context_manager.current_context.messages.append(
type('obj', (object,), {
'role': tool_result['role'],
'content': tool_result['content'],
'timestamp': datetime.datetime.now(),
'message_id': str(uuid.uuid4())
})()
)
# 再次调用AI获取最终响应
final_result = self.ai_service.process_with_context(context)
if final_result["success"]:
final_response = final_result["response"]
assistant_content = final_response.get("content", "")
else:
assistant_content = "处理工具调用时发生错误。"
else:
assistant_content = response_message.get("content", "")
# 更新UI
self.root.after(0, self._update_ui_with_response, assistant_content)
else:
error_msg = f"AI服务错误: {result['error']}"
self.root.after(0, self._update_ui_with_response, error_msg)
except Exception as e:
error_msg = f"处理消息时发生错误: {str(e)}"
self.root.after(0, self._update_ui_with_response, error_msg)
def _update_ui_with_response(self, response: str):
"""在主线程中更新UI"""
self.context_manager.add_assistant_message(response)
self._display_current_chat()
self._load_session_list() # 更新会话列表
def _on_closing(self):
"""窗口关闭事件"""
if messagebox.askokcancel("退出", "确定要退出MCP桌面AI助手吗?"):
self.root.destroy()
def main():
root = tk.Tk()
app = MCPDesktopAssistant(root)
root.mainloop()
if __name__ == "__main__":
main()
7.2 配置文件
# config.py
import os
from pathlib import Path
class Config:
"""配置类"""
# API配置
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
# 路径配置
CONTEXT_STORAGE_PATH = "mcp_contexts"
LOG_PATH = "logs"
# 应用配置
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 600
MAX_CONTEXT_MESSAGES = 20 # 最大上下文消息数
@classmethod
def ensure_directories(cls):
"""确保必要目录存在"""
Path(cls.CONTEXT_STORAGE_PATH).mkdir(exist_ok=True)
Path(cls.LOG_PATH).mkdir(exist_ok=True)
八、使用说明
8.1 环境准备
# 安装依赖
pip install openai #选用国外模型
pip install zhipuai #选用国内模型
# 设置环境变量(可选)
export OPENAI_API_KEY="your-api-key-here"
export ZHIPU_API_KEY="your-api-key-here"
8.2 运行应用
python main.py
首次运行时会弹出对话框要求输入OpenAI API密钥。
8.3 功能演示
- 基本对话:输入普通问题,如"你好"、“今天天气怎么样?”
- 数学计算:输入"计算 15 * 24 + 78",助手会调用计算器工具
- 时间查询:输入"现在几点了?",助手会返回当前时间
- 多轮对话:上下文会自动维护,支持连续对话
- 会话管理:左侧可以查看和切换不同会话
九、优化建议
9.1 性能优化
- 上下文压缩:实现智能上下文摘要,减少token使用
- 缓存机制:缓存常用响应,减少API调用
- 异步处理:使用asyncio替代threading获得更好性能
9.2 功能扩展
- 更多工具:添加文件操作、网络搜索等功能
- 语音输入:集成语音识别功能
- 主题切换:支持深色/浅色主题
- 导出功能:支持导出对话记录
9.3 安全性增强
- API密钥加密存储:使用系统密钥环存储敏感信息
- 输入过滤:防止恶意输入
- 隐私保护:提供本地模式选项
十、总结
通过这个TKinter桌面应用项目,我们成功地将MCP模型上下文协议集成到了图形化界面中。这个实现展示了:
- MCP协议的实用性:为桌面应用提供了结构化的上下文管理
- TKinter的灵活性:能够构建功能完整的AI应用界面
- 工具调用的集成:实现了完整的工具调用工作流
- 离线能力:本地存储上下文,支持断网使用
这个项目不仅是一个实用的桌面AI助手,也为其他TKinter开发者提供了MCP集成的参考模板。通过MCP协议,我们可以构建更加智能、高效和用户友好的桌面AI应用。
火山引擎开发者社区是火山引擎打造的AI技术生态平台,聚焦Agent与大模型开发,提供豆包系列模型(图像/视频/视觉)、智能分析与会话工具,并配套评测集、动手实验室及行业案例库。社区通过技术沙龙、挑战赛等活动促进开发者成长,新用户可领50万Tokens权益,助力构建智能应用。
更多推荐
所有评论(0)