Python+MCP+LLM
使用本地部署的ollama尝试调用mcp封装好的工具。
·
初步了解server 和 client
MCP server
from fastmcp import FastMCP
import requests
import json
mcp = FastMCP()
def post_json(url: str, payload: dict):
try:
response = requests.post(url, json=payload)
return json.dumps(response.json(), ensure_ascii=False, indent=2)
except Exception as e:
return f"❌ 请求失败: {e}"
@mcp.tool(description="获取对应天气")
def get_weather(city:str):
return f'{city}天气好'
@mcp.tool(description="读取本地文本文件,输入是文件路径")
def read_text_tool(file_path: str) -> str:
return post_json("http://localhost:8000/read_text", {"file_path": file_path})
@mcp.tool(description="读取 PDF 文件文本内容,输入是文件路径")
def read_pdf_tool(file_path: str) -> str:
return post_json("http://localhost:8000/read_pdf", {"file_path": file_path})
@mcp.tool(description="预览 PDF 首页图片,输入是文件路径")
def preview_pdf_tool(file_path: str, max_pages: int = 1) -> str:
return post_json("http://localhost:8000/preview_pdf", {
"file_path": file_path,
"max_pages": max_pages
})
if __name__ == '__main__':
mcp.run()
MCP Client
import asyncio
from fastmcp import Client
from server import mcp
async def run():
client = Client(mcp)
async with client:
tools = await client.list_tools()
tool=tools[0]
result=await client.call_tool(tool.name,{"city":"nanchang"})
print(result)
if __name__ == '__main__':
asyncio.run(run())
进阶
使用本地部署的ollama尝试调用mcp封装好的工具
import asyncio
from typing import Dict, List
from fastmcp import Client
from server import mcp
from openai import OpenAI
class UserClient:
def __init__(self,model="qwen3:0.6b"):
self.model=model
self.mcp_client=Client(mcp)
self.openai_client=OpenAI(
base_url="http://127.0.0.1:11434/v1",
api_key="None"
)
self.message=[{
"role":"system",
"content":"你是一个AI助手,你需要借助工具,回答用户问题"
}]
self.tools=[]
async def prepare_tools(self):
tools=await self.mcp_client.list_tools()
tools=[
{
"type":"function",
"function":{
"name":tool.name,
"description":tool.description,
"input_schema":tool.inputSchema
}
}
for tool in tools
]
return tools
async def chat(self, messages: List[Dict]):
async with self.mcp_client:
if not self.tools:
self.tools = await self.prepare_tools()
response = self.openai_client.chat.completions.create(
model=self.model,
messages=messages,
tools=self.tools,
)
print(response)
async def loop(self):
while True:
question=input("user:")
message={
"role":"user",
"content":question
}
self.message.append(message)
response_message=await self.mcp_client.chat(self.message)
print("AI:",response_message.get('content'))
async def main():
user_client=UserClient()
await user_client.chat([
{"role":"user","content":"青岛今天天气怎么样"}
])
if __name__ == '__main__':
asyncio.run(main())
火山引擎开发者社区是火山引擎打造的AI技术生态平台,聚焦Agent与大模型开发,提供豆包系列模型(图像/视频/视觉)、智能分析与会话工具,并配套评测集、动手实验室及行业案例库。社区通过技术沙龙、挑战赛等活动促进开发者成长,新用户可领50万Tokens权益,助力构建智能应用。
更多推荐
所有评论(0)