一、三种传输方式详解
1. Stdio 传输
1.1 原理说明
Stdio(标准输入输出)传输是最简单的传输方式。MCP Client 将Server 作为 子进程 启动,通过进程的 stdin 和 stdout 进行双向通信。
关键规则:
- 每条JSON-RPC 消息以 换行符
\n分隔 - stdout 专用于协议消息,日志/调试信息必须输出到 stderr
- Client 与 Server 一对一绑定,生命周期同步
1.2 原理图(draw.io)
1.3 交互流程图(draw.io 时序图)
1.4 代码示例
Server 端: stdio_server.py
1# main.py 2from mcp.server.fastmcp import FastMCP 3from mcp.server.fastmcp.prompts import base 4 5import logging 6import json 7 8logger = logging.getLogger(__name__) 9logging.basicConfig(level=logging.INFO) 10 11# 创建MCP实例 12mcp = FastMCP(name="stdio-demo") 13 14 15# 添加一个简单的资源 16@mcp.tool() 17def greeting(name: str = "World") -> str: 18 """返回问候语""" 19 return f"Hello, {name}!" 20 21 22# 添加计算功能 23@mcp.tool() 24def add(a: int, b: int) -> int: 25 """加法计算器""" 26 return a + b 27 28 29@mcp.resource("models://") 30def get_models() -> str: 31 """Get information about available AI models""" 32 logger.info("Retrieving available models") 33 models_data = [ 34 { 35 "id": "gpt-4", 36 "name": "GPT-4", 37 "description": "OpenAI's GPT-4 large language model" 38 }, 39 { 40 "id": "llama-3-70b", 41 "name": "LLaMA 3 (70B)", 42 "description": "Meta's LLaMA 3 with 70 billion parameters" 43 }, 44 { 45 "id": "claude-3-sonnet", 46 "name": "Claude 3 Sonnet", 47 "description": "Anthropic's Claude 3 Sonnet model" 48 } 49 ] 50 51 return json.dumps({"models": models_data}) 52 53 54# Define a greeting resource that dynamically constructs a personalized greeting 55@mcp.resource("greeting://{name}") 56def get_greeting(name: str) -> str: 57 """Return a greeting for the given name 58 59 Args: 60 name: The name to greet 61 62 Returns: 63 A personalized greeting message 64 """ 65 import urllib.parse 66 # Decode URL-encoded name 67 decoded_name = urllib.parse.unquote(name) 68 logger.info(f"Generating greeting for {decoded_name}") 69 return f"Hello, {decoded_name}!" 70 71 72@mcp.resource("file://documents/{name}") 73def read_document(name: str) -> str: 74 """Read a document by name.""" 75 # This would normally read from disk 76 return f"Content of {name}" 77 78 79@mcp.prompt(title="Code Review") 80def review_code(code: str) -> str: 81 return f"Please review this code:\n\n{code}" 82 83 84@mcp.prompt(title="Debug Assistant") 85def debug_error(error: str) -> list[base.Message]: 86 return [ 87 base.UserMessage("I'm seeing this error:"), 88 base.UserMessage(error), 89 base.AssistantMessage("I'll help debug that. What have you tried so far?"), 90 ] 91 92 93if __name__ == "__main__": 94 mcp.run() 95
Client 端: stdio_client.py
1# client.py 2import sys 3import urllib 4 5from mcp import stdio_client, StdioServerParameters 6from mcp.client.session import ClientSession 7 8import asyncio 9 10import logging 11 12import json 13 14from mcp.types import TextContent, TextResourceContents 15 16logging.basicConfig(level=logging.INFO) 17logger = logging.getLogger(__name__) 18 19 20async def main(): 21 """Main client function that demonstrates MCP client features""" 22 logger.info("Starting clean MCP client") 23 24 try: 25 logger.info("Connecting to server...") 26 params = StdioServerParameters( 27 command="python", # Executable 28 args=["sse-server.py"], # Server script 29 env=None, # Optional environment variables 30 ) 31 async with stdio_client(params) as (reader, writer): 32 async with ClientSession(reader, writer) as session: 33 logger.info("Initializing session") 34 await session.initialize() 35 36 # 1. Call the add tool 37 logger.info("Testing calculator tool") 38 add_result = await session.call_tool("add", arguments={"a": 5, "b": 7}) 39 if add_result and add_result.content: 40 text_content = next((content for content in add_result.content 41 if isinstance(content, TextContent)), None) 42 if text_content: 43 print(f"\n1. Calculator result (5 + 7) = {text_content.text}") 44 45 46 # 2. Get models resource 47 logger.info("Testing models resource") 48 models_response = await session.read_resource("models://") 49 if models_response and models_response.contents: 50 text_resource = next((content for content in models_response.contents 51 if isinstance(content, TextResourceContents)), None) 52 if text_resource: 53 models = json.loads(text_resource.text) 54 print("\n3. Available models:") 55 for model in models.get("models", []): 56 print(f" - {model['name']} ({model['id']}): {model['description']}") 57 58 # 4. Get greeting resource 59 logger.info("Testing greeting resource") 60 name = "MCP Explorer" 61 encoded_name = urllib.parse.quote(name) 62 greeting_response = await session.read_resource(f"greeting://{encoded_name}") 63 if greeting_response and greeting_response.contents: 64 text_resource = next((content for content in greeting_response.contents 65 if isinstance(content, TextResourceContents)), None) 66 if text_resource: 67 print(f"\n4. Greeting: {text_resource.text}") 68 69 70 # 5. Get document resource 71 logger.info("Testing document resource") 72 document_response = await session.read_resource("file://documents/example.txt") 73 if document_response and document_response.contents: 74 text_resource = next((content for content in document_response.contents 75 if isinstance(content, TextResourceContents)), None) 76 if text_resource: 77 print(f"\n5. Document content:") 78 print(f" {text_resource.text}") 79 80 # 6. Use code review prompt 81 logger.info("Testing code review prompt") 82 sample_code = "def hello_world():\n print('Hello, world!')" 83 prompt_response = await session.get_prompt("review_code", {"code": sample_code}) 84 if prompt_response and prompt_response.messages: 85 message = next((msg for msg in prompt_response.messages if msg.content), None) 86 if message and message.content: 87 text_content = next((content for content in [message.content] 88 if isinstance(content, TextContent)), None) 89 if text_content: 90 print("\n6. Code review prompt:") 91 print(f" {text_content.text}") 92 93 except Exception: 94 logger.exception("An error occurred") 95 sys.exit(1) 96 97 98if __name__ == "__main__": 99 asyncio.run(main()) 100
2. SSE 传输
2.1 原理说明
SSE (Server-Sent Events) 传输基于 HTTP,使用 两个通道 实现双向通信:
| 通道 | 方向 | 方法 | 作用 |
|---|---|---|---|
| /sse | Server → Client | GET | 持久 SSE 长连接,服务器推送消息 |
| /messages | Client → Server | POST | 客户端发送 JSON-RPC 请求 |
⚠️ SSE 传输在MCP 协议 2025-03-26 版本中已被标记为 deprecated(弃用) ,推荐使用 Streamable HTTP 替代。
2.2 原理图 (draw.io)
2.3 交互流程图 (draw.io)
2.4 代码示例
Server 端: sse_server.py
1# main.py 2from mcp.server.fastmcp import FastMCP 3from mcp.server.fastmcp.prompts import base 4 5import logging 6import json 7 8logger = logging.getLogger(__name__) 9logging.basicConfig(level=logging.INFO) 10 11# 创建MCP实例 12mcp = FastMCP(name="sse-demo", port=8082) 13 14 15# 添加一个简单的资源 16@mcp.tool() 17def greeting(name: str = "World") -> str: 18 """返回问候语""" 19 return f"Hello, {name}!" 20 21 22# 添加计算功能 23@mcp.tool() 24def add(a: int, b: int) -> int: 25 """加法计算器""" 26 return a + b 27 28 29@mcp.resource("models://") 30def get_models() -> str: 31 """Get information about available AI models""" 32 logger.info("Retrieving available models") 33 models_data = [ 34 { 35 "id": "gpt-4", 36 "name": "GPT-4", 37 "description": "OpenAI's GPT-4 large language model" 38 }, 39 { 40 "id": "llama-3-70b", 41 "name": "LLaMA 3 (70B)", 42 "description": "Meta's LLaMA 3 with 70 billion parameters" 43 }, 44 { 45 "id": "claude-3-sonnet", 46 "name": "Claude 3 Sonnet", 47 "description": "Anthropic's Claude 3 Sonnet model" 48 } 49 ] 50 51 return json.dumps({"models": models_data}) 52 53 54# Define a greeting resource that dynamically constructs a personalized greeting 55@mcp.resource("greeting://{name}") 56def get_greeting(name: str) -> str: 57 """Return a greeting for the given name 58 59 Args: 60 name: The name to greet 61 62 Returns: 63 A personalized greeting message 64 """ 65 import urllib.parse 66 # Decode URL-encoded name 67 decoded_name = urllib.parse.unquote(name) 68 logger.info(f"Generating greeting for {decoded_name}") 69 return f"Hello, {decoded_name}!" 70 71 72@mcp.resource("file://documents/{name}") 73def read_document(name: str) -> str: 74 """Read a document by name.""" 75 # This would normally read from disk 76 return f"Content of {name}" 77 78 79@mcp.prompt(title="Code Review") 80def review_code(code: str) -> str: 81 return f"Please review this code:\n\n{code}" 82 83 84@mcp.prompt(title="Debug Assistant") 85def debug_error(error: str) -> list[base.Message]: 86 return [ 87 base.UserMessage("I'm seeing this error:"), 88 base.UserMessage(error), 89 base.AssistantMessage("I'll help debug that. What have you tried so far?"), 90 ] 91 92 93if __name__ == "__main__": 94 mcp.run(transport="sse") 95
Client 端: sse_client.py
1# client.py 2import sys 3import urllib 4 5from mcp.client.sse import sse_client 6from mcp.client.session import ClientSession 7 8import asyncio 9 10import logging 11 12import json 13 14from mcp.types import TextContent, TextResourceContents 15 16logging.basicConfig(level=logging.INFO) 17logger = logging.getLogger(__name__) 18 19 20async def main(): 21 """Main client function that demonstrates MCP client features""" 22 logger.info("Starting clean MCP client") 23 24 try: 25 logger.info("Connecting to server...") 26 async with sse_client(url="http://localhost:8082/sse") as (reader, writer): 27 async with ClientSession(reader, writer) as session: 28 logger.info("Initializing session") 29 await session.initialize() 30 31 # 1. Call the add tool 32 logger.info("Testing calculator tool") 33 add_result = await session.call_tool("add", arguments={"a": 5, "b": 7}) 34 if add_result and add_result.content: 35 text_content = next((content for content in add_result.content 36 if isinstance(content, TextContent)), None) 37 if text_content: 38 print(f"\n1. Calculator result (5 + 7) = {text_content.text}") 39 40 41 # 2. Get models resource 42 logger.info("Testing models resource") 43 models_response = await session.read_resource("models://") 44 if models_response and models_response.contents: 45 text_resource = next((content for content in models_response.contents 46 if isinstance(content, TextResourceContents)), None) 47 if text_resource: 48 models = json.loads(text_resource.text) 49 print("\n3. Available models:") 50 for model in models.get("models", []): 51 print(f" - {model['name']} ({model['id']}): {model['description']}") 52 53 # 4. Get greeting resource 54 logger.info("Testing greeting resource") 55 name = "MCP Explorer" 56 encoded_name = urllib.parse.quote(name) 57 greeting_response = await session.read_resource(f"greeting://{encoded_name}") 58 if greeting_response and greeting_response.contents: 59 text_resource = next((content for content in greeting_response.contents 60 if isinstance(content, TextResourceContents)), None) 61 if text_resource: 62 print(f"\n4. Greeting: {text_resource.text}") 63 64 65 # 5. Get document resource 66 logger.info("Testing document resource") 67 document_response = await session.read_resource("file://documents/example.txt") 68 if document_response and document_response.contents: 69 text_resource = next((content for content in document_response.contents 70 if isinstance(content, TextResourceContents)), None) 71 if text_resource: 72 print(f"\n5. Document content:") 73 print(f" {text_resource.text}") 74 75 # 6. Use code review prompt 76 logger.info("Testing code review prompt") 77 sample_code = "def hello_world():\n print('Hello, world!')" 78 prompt_response = await session.get_prompt("review_code", {"code": sample_code}) 79 if prompt_response and prompt_response.messages: 80 message = next((msg for msg in prompt_response.messages if msg.content), None) 81 if message and message.content: 82 text_content = next((content for content in [message.content] 83 if isinstance(content, TextContent)), None) 84 if text_content: 85 print("\n6. Code review prompt:") 86 print(f" {text_content.text}") 87 88 except Exception: 89 logger.exception("An error occurred") 90 sys.exit(1) 91 92 93if __name__ == "__main__": 94 asyncio.run(main()) 95
3. Streamable HTTP 传输
3.1 原理说明
Streamable HTTP 是MCP 协议 2025-03-26 版本引入的 新一代传输方式,作为 SSE 的替代方案。
核心设计:
| 特性 | 说明 |
|---|---|
| 单端点 | 所有通信通过POST /mcp 进行(路径可自定义) |
| 灵活响应 | 服务器可返回 普通 JSON 或 SSE 流 |
| 会话管理 | 通过 Mcp-Session-Id 请求头管理,可选 |
| 有/无状态 | 同时支持有状态(会话绑定)和无状态(每次独立)模式 |
| 服务器推送 | 客户端可通过 GET /mcp 打开 SSE 流接收通知 |
| 会话终止 | 通过 DELETE /mcp 主动结束会话 |
3.2 原理图 (draw.io)
3.3 交互流程图 (draw.io)
3.4 代码示例
Server 端streamable_http_server.py
1# main.py 2from mcp.server.fastmcp import FastMCP 3from mcp.server.fastmcp.prompts import base 4 5import logging 6import json 7 8logger = logging.getLogger(__name__) 9logging.basicConfig(level=logging.INFO) 10 11# 创建MCP实例 12mcp = FastMCP(name="streamable-demo", port=8081, stateless_http=False) 13 14 15# 添加一个简单的资源 16@mcp.tool() 17def greeting(name: str = "World") -> str: 18 """返回问候语""" 19 return f"Hello, {name}!" 20 21 22# 添加计算功能 23@mcp.tool() 24def add(a: int, b: int) -> int: 25 """加法计算器""" 26 return a + b 27 28 29@mcp.resource("models://") 30def get_models() -> str: 31 """Get information about available AI models""" 32 logger.info("Retrieving available models") 33 models_data = [ 34 { 35 "id": "gpt-4", 36 "name": "GPT-4", 37 "description": "OpenAI's GPT-4 large language model" 38 }, 39 { 40 "id": "llama-3-70b", 41 "name": "LLaMA 3 (70B)", 42 "description": "Meta's LLaMA 3 with 70 billion parameters" 43 }, 44 { 45 "id": "claude-3-sonnet", 46 "name": "Claude 3 Sonnet", 47 "description": "Anthropic's Claude 3 Sonnet model" 48 } 49 ] 50 51 return json.dumps({"models": models_data}) 52 53 54# Define a greeting resource that dynamically constructs a personalized greeting 55@mcp.resource("greeting://{name}") 56def get_greeting(name: str) -> str: 57 """Return a greeting for the given name 58 59 Args: 60 name: The name to greet 61 62 Returns: 63 A personalized greeting message 64 """ 65 import urllib.parse 66 # Decode URL-encoded name 67 decoded_name = urllib.parse.unquote(name) 68 logger.info(f"Generating greeting for {decoded_name}") 69 return f"Hello, {decoded_name}!" 70 71 72@mcp.resource("file://documents/{name}") 73def read_document(name: str) -> str: 74 """Read a document by name.""" 75 # This would normally read from disk 76 return f"Content of {name}" 77 78 79@mcp.prompt(title="Code Review") 80def review_code(code: str) -> str: 81 return f"Please review this code:\n\n{code}" 82 83 84@mcp.prompt(title="Debug Assistant") 85def debug_error(error: str) -> list[base.Message]: 86 return [ 87 base.UserMessage("I'm seeing this error:"), 88 base.UserMessage(error), 89 base.AssistantMessage("I'll help debug that. What have you tried so far?"), 90 ] 91 92 93if __name__ == "__main__": 94 mcp.run(transport="streamable-http") 95
Client 端: streamable_http_client.py
1# client.py - Streamable HTTP Protocol MCP Client 2import sys 3import urllib.parse 4from mcp.client.streamable_http import streamablehttp_client 5 6from mcp.client.session import ClientSession 7 8import asyncio 9import logging 10import json 11 12from mcp.types import TextContent, TextResourceContents 13from mcp.server.fastmcp.prompts import base 14 15logging.basicConfig(level=logging.INFO) 16logger = logging.getLogger(__name__) 17 18 19async def main(): 20 """Main client function that demonstrates MCP client features with streamable protocol""" 21 logger.info("Starting Streamable MCP Client") 22 23 try: 24 # Connect to streamable HTTP server 25 logger.info("Connecting to streamable server at http://localhost:8081/mcp...") 26 async with streamablehttp_client("http://localhost:8081/mcp") as (reader, writer, callback): 27 async with ClientSession(reader, writer) as session: 28 logger.info("Initializing session") 29 await session.initialize() 30 31 # 1. Call the greeting tool 32 logger.info("Testing greeting tool") 33 greeting_result = await session.call_tool("greeting", arguments={"name": "World"}) 34 if greeting_result and greeting_result.content: 35 text_content = next((content for content in greeting_result.content 36 if isinstance(content, TextContent)), None) 37 if text_content: 38 print(f"\n1. Greeting: {text_content.text}") 39 40 # 2. Call the add tool 41 logger.info("Testing calculator tool") 42 add_result = await session.call_tool("add", arguments={"a": 5, "b": 7}) 43 if add_result and add_result.content: 44 text_content = next((content for content in add_result.content 45 if isinstance(content, TextContent)), None) 46 if text_content: 47 print(f"\n2. Calculator result (5 + 7) = {text_content.text}") 48 49 # 3. Get models resource 50 logger.info("Testing models resource") 51 models_response = await session.read_resource("models://") 52 if models_response and models_response.contents: 53 text_resource = next((content for content in models_response.contents 54 if isinstance(content, TextResourceContents)), None) 55 if text_resource: 56 models = json.loads(text_resource.text) 57 print("\n3. Available models:") 58 for model in models.get("models", []): 59 print(f" - {model['name']} ({model['id']}): {model['description']}") 60 61 # 4. Get greeting resource 62 logger.info("Testing greeting resource") 63 name = "MCP Explorer" 64 encoded_name = urllib.parse.quote(name) 65 greeting_response = await session.read_resource(f"greeting://{encoded_name}") 66 if greeting_response and greeting_response.contents: 67 text_resource = next((content for content in greeting_response.contents 68 if isinstance(content, TextResourceContents)), None) 69 if text_resource: 70 print(f"\n4. Greeting: {text_resource.text}") 71 72 # 5. Get document resource 73 logger.info("Testing document resource") 74 document_response = await session.read_resource("file://documents/example.txt") 75 if document_response and document_response.contents: 76 text_resource = next((content for content in document_response.contents 77 if isinstance(content, TextResourceContents)), None) 78 if text_resource: 79 print(f"\n5. Document content:") 80 print(f" {text_resource.text}") 81 82 # 6. Use code review prompt 83 logger.info("Testing code review prompt") 84 sample_code = "def hello_world():\n print('Hello, world!')" 85 prompt_response = await session.get_prompt("review_code", {"code": sample_code}) 86 if prompt_response and prompt_response.messages: 87 message = next((msg for msg in prompt_response.messages if msg.content), None) 88 if message and message.content: 89 text_content = next((content for content in [message.content] 90 if isinstance(content, TextContent)), None) 91 if text_content: 92 print("\n6. Code review prompt:") 93 print(f" {text_content.text}") 94 95 # 7. Use debug error prompt (multi-message format) 96 logger.info("Testing debug assistant prompt") 97 error_message = "AttributeError: 'NoneType' object has no attribute 'method'" 98 debug_response = await session.get_prompt("debug_error", {"error": error_message}) 99 if debug_response and debug_response.messages: 100 print("\n7. Debug assistant prompt (multi-message):") 101 for idx, msg in enumerate(debug_response.messages): 102 if isinstance(msg, base.UserMessage): 103 print(f" [User Message {idx + 1}]: {msg.content.text if hasattr(msg.content, 'text') else msg.content}") 104 elif isinstance(msg, base.AssistantMessage): 105 print(f" [Assistant Message {idx + 1}]: {msg.content.text if hasattr(msg.content, 'text') else msg.content}") 106 107 except Exception: 108 logger.exception("An error occurred") 109 sys.exit(1) 110 111 112if __name__ == "__main__": 113 asyncio.run(main()) 114
二、三种传输方式对比
| 特性 | Stdio | SSE | Streamable HTTP |
|---|---|---|---|
| 协议版本 | 所有版本 | 2024-11-05 (已弃用) | 2025-03-26 (推荐) |
| 通信方式 | 进程 stdin/stdout | HTTP GET + POST (双端点) | 统一 HTTP POST (单端点) |
| 连接模式 | 进程绑定 | 持久 SSE 长连接 | 灵活 (可有状态/无状态) |
| 网络支持 | ❌ 仅本地 | ✅ 支持 | ✅ 支持 |
| 端点数量 | N/A | 2 (/sse + /messages) | 1 (/mcp) |
| 流式响应 | ✅ 天然流式 | ✅ SSE 推送 | ✅ 可选 JSON/SSE 流 |
| 多客户端 | ❌ 一对一 | ✅ | ✅ |
| 无状态模式 | ❌ | ❌ | ✅ |
| 会话恢复 | ❌ | ❌ 重连困难 | ✅ 支持 |
| 基础设施兼容 | N/A | ⚠️ SSE 代理兼容问题 | ✅ 标准 HTTP |
| 适合场景 | 本地 IDE / CLI | 旧版网络服务 | 新项目首选 |
《mcp学习笔记(三)-Mcp传输协议代码示例》 是转载文章,点击查看原文。