MCP Python script creation prompt
LLM
Qwen/Qwen2.5-72B-Instruct-128K
Prompt
# Build an MCP server that:
- Connects to my company's mysql database
- crenditials are stored in a .env file or in os.enivronment variables, check for both
- Use logging to log important tasks
- Use pymysql to connect to the database
- Use mcp to expose the database as a resource
- Use mcp to expose tables as resources
- Use mcp to expose table schemas as resources
- Use mcp and pymysql to expose tools for running read-only SQL queries
- Includes 1 prompt for common data analysis tasks
- Save file to ./mcp_local/mcp_mysql_py/server_fk.py
# Example:
```python
from typing import Annotated, Tuple
from urllib.parse import urlparse, urlunparse
import markdownify
import readabilipy.simple_json
from mcp.shared.exceptions import McpError
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import (
ErrorData,
GetPromptResult,
Prompt,
PromptArgument,
PromptMessage,
TextContent,
Tool,
INVALID_PARAMS,
INTERNAL_ERROR,
)
from protego import Protego
from pydantic import BaseModel, Field, AnyUrl
DEFAULT_USER_AGENT_AUTONOMOUS = "ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)"
DEFAULT_USER_AGENT_MANUAL = "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)"
def extract_content_from_html(html: str) -> str:
"""Extract and convert HTML content to Markdown format.
Args:
html: Raw HTML content to process
Returns:
Simplified markdown version of the content
"""
ret = readabilipy.simple_json.simple_json_from_html_string(
html, use_readability=True
)
if not ret["content"]:
return "<error>Page failed to be simplified from HTML</error>"
content = markdownify.markdownify(
ret["content"],
heading_style=markdownify.ATX,
)
return content
def get_robots_txt_url(url: str) -> str:
"""Get the robots.txt URL for a given website URL.
Args:
url: Website URL to get robots.txt for
Returns:
URL of the robots.txt file
"""
# Parse the URL into components
parsed = urlparse(url)
# Reconstruct the base URL with just scheme, netloc, and /robots.txt path
robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", ""))
return robots_url
async def check_may_autonomously_fetch_url(url: str, user_agent: str) -> None:
"""
Check if the URL can be fetched by the user agent according to the robots.txt file.
Raises a McpError if not.
"""
from httpx import AsyncClient, HTTPError
robot_txt_url = get_robots_txt_url(url)
async with AsyncClient() as client:
try:
response = await client.get(
robot_txt_url,
follow_redirects=True,
headers={"User-Agent": user_agent},
)
except HTTPError:
raise McpError(ErrorData(
code=INTERNAL_ERROR,
message=f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue",
))
if response.status_code in (401, 403):
raise McpError(ErrorData(
code=INTERNAL_ERROR,
message=f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt",
))
elif 400 <= response.status_code < 500:
return
robot_txt = response.text
processed_robot_txt = "\n".join(
line for line in robot_txt.splitlines() if not line.strip().startswith("#")
)
robot_parser = Protego.parse(processed_robot_txt)
if not robot_parser.can_fetch(str(url), user_agent):
raise McpError(ErrorData(
code=INTERNAL_ERROR,
message=f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, "
f"<useragent>{user_agent}</useragent>\n"
f"<url>{url}</url>"
f"<robots>\n{robot_txt}\n</robots>\n"
f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n"
f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.",
))
async def fetch_url(
url: str, user_agent: str, force_raw: bool = False
) -> Tuple[str, str]:
"""
Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information.
"""
from httpx import AsyncClient, HTTPError
async with AsyncClient() as client:
try:
response = await client.get(
url,
follow_redirects=True,
headers={"User-Agent": user_agent},
timeout=30,
)
except HTTPError as e:
raise McpError(ErrorData(code=INTERNAL_ERROR, message=f"Failed to fetch {url}: {e!r}"))
if response.status_code >= 400:
raise McpError(ErrorData(
code=INTERNAL_ERROR,
message=f"Failed to fetch {url} - status code {response.status_code}",
))
page_raw = response.text
content_type = response.headers.get("content-type", "")
is_page_html = (
"<html" in page_raw[:100] or "text/html" in content_type or not content_type
)
if is_page_html and not force_raw:
return extract_content_from_html(page_raw), ""
return (
page_raw,
f"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\n",
)
class Fetch(BaseModel):
"""Parameters for fetching a URL."""
url: Annotated[AnyUrl, Field(description="URL to fetch")]
max_length: Annotated[
int,
Field(
default=5000,
description="Maximum number of characters to return.",
gt=0,
lt=1000000,
),
]
start_index: Annotated[
int,
Field(
default=0,
description="On return output starting at this character index, useful if a previous fetch was truncated and more context is required.",
ge=0,
),
]
raw: Annotated[
bool,
Field(
default=False,
description="Get the actual HTML content if the requested page, without simplification.",
),
]
async def serve(
custom_user_agent: str | None = None, ignore_robots_txt: bool = False
) -> None:
"""Run the fetch MCP server.
Args:
custom_user_agent: Optional custom User-Agent string to use for requests
ignore_robots_txt: Whether to ignore robots.txt restrictions
"""
server = Server("mcp-fetch")
user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS
user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL
@server.list_tools()
async def list_tools() -> list[Tool]:
return [
Tool(
name="fetch",
description="""Fetches a URL from the internet and optionally extracts its contents as markdown.
Although originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.""",
inputSchema=Fetch.model_json_schema(),
)
]
@server.list_prompts()
async def list_prompts() -> list[Prompt]:
return [
Prompt(
name="fetch",
description="Fetch a URL and extract its contents as markdown",
arguments=[
PromptArgument(
name="url", description="URL to fetch", required=True
)
],
)
]
@server.call_tool()
async def call_tool(name, arguments: dict) -> list[TextContent]:
try:
args = Fetch(**arguments)
except ValueError as e:
raise McpError(ErrorData(code=INVALID_PARAMS, message=str(e)))
url = str(args.url)
if not url:
raise McpError(ErrorData(code=INVALID_PARAMS, message="URL is required"))
if not ignore_robots_txt:
await check_may_autonomously_fetch_url(url, user_agent_autonomous)
content, prefix = await fetch_url(
url, user_agent_autonomous, force_raw=args.raw
)
original_length = len(content)
if args.start_index >= original_length:
content = "<error>No more content available.</error>"
else:
truncated_content = content[args.start_index : args.start_index + args.max_length]
if not truncated_content:
content = "<error>No more content available.</error>"
else:
content = truncated_content
actual_content_length = len(truncated_content)
remaining_content = original_length - (args.start_index + actual_content_length)
# Only add the prompt to continue fetching if there is still remaining content
if actual_content_length == args.max_length and remaining_content > 0:
next_start = args.start_index + actual_content_length
content += f"\n\n<error>Content truncated. Call the fetch tool with a start_index of {next_start} to get more content.</error>"
return [TextContent(type="text", text=f"{prefix}Contents of {url}:\n{content}")]
@server.get_prompt()
async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult:
if not arguments or "url" not in arguments:
raise McpError(ErrorData(code=INVALID_PARAMS, message="URL is required"))
url = arguments["url"]
try:
content, prefix = await fetch_url(url, user_agent_manual)
# TODO: after SDK bug is addressed, don't catch the exception
except McpError as e:
return GetPromptResult(
description=f"Failed to fetch {url}",
messages=[
PromptMessage(
role="user",
content=TextContent(type="text", text=str(e)),
)
],
)
return GetPromptResult(
description=f"Contents of {url}",
messages=[
PromptMessage(
role="user", content=TextContent(type="text", text=prefix + content)
)
],
)
options = server.create_initialization_options()
async with stdio_server() as (read_stream, write_stream):
await server.run(read_stream, write_stream, options, raise_exceptions=True)
234
server_fk.py
import logging
from typing import Annotated, Tuple, List, Dict, Any
from urllib.parse import urlparse, urlunparse
import os
import pymysql
from mcp.shared.exceptions import McpError
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import (
ErrorData,
GetPromptResult,
Prompt,
PromptArgument,
PromptMessage,
TextContent,
Tool,
Resource,
ResourceTemplate,
ResourceContents,
INVALID_PARAMS,
INTERNAL_ERROR,
)
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def get_db_credentials() -> Dict[str, str]:
"""Get MySQL database credentials from .env file or OS environment variables."""
credentials = {
'host': os.getenv('MYSQL_HOST', 'localhost'),
'user': os.getenv('MYSQL_USER', 'your_username'),
'password': os.getenv('MYSQL_PASSWORD', 'your_password'),
'database': os.getenv('MYSQL_DATABASE', 'your_database'),
'port': int(os.getenv('MYSQL_PORT', '3306')),
}
return credentials
async def connect_to_db() -> pymysql.connections.Connection:
"""Connect to the MySQL database using the provided credentials."""
credentials = get_db_credentials()
try:
connection = pymysql.connect(
host=credentials['host'],
user=credentials['user'],
password=credentials['password'],
database=credentials['database'],
port=credentials['port'],
cursorclass=pymysql.cursors.DictCursor
)
logging.info(f"Successfully connected to the database: {credentials['database']}")
return connection
except pymysql.MySQLError as e:
logging.error(f"Failed to connect to the database: {e}")
raise McpError(ErrorData(code=INTERNAL_ERROR, message=f"Failed to connect to the database: {e}"))
async def list_tables(connection: pymysql.connections.Connection) -> List[str]:
"""List all tables in the database."""
with connection.cursor() as cursor:
cursor.execute("SHOW TABLES")
tables = [row[0] for row in cursor.fetchall()]
logging.info(f"Listed tables: {tables}")
return tables
async def get_table_schema(connection: pymysql.connections.Connection, table_name: str) -> Dict[str, Any]:
"""Get the schema of a specific table."""
with connection.cursor() as cursor:
cursor.execute(f"DESCRIBE {table_name}")
schema = cursor.fetchall()
logging.info(f"Described table schema for {table_name}: {schema}")
return schema
async def run_read_only_query(connection: pymysql.connections.Connection, query: str) -> List[Dict[str, Any]]:
"""Run a read-only SQL query on the database."""
with connection.cursor() as cursor:
cursor.execute(query)
results = cursor.fetchall()
logging.info(f"Query results: {results}")
return results
async def serve() -> None:
"""Run the MCP server for MySQL database access."""
logging.info("Starting MCP server for MySQL database access.")
server = Server("mcp-mysql-py")
@server.list_resources()
async def list_resources() -> List[Resource]:
connection = await connect_to_db()
tables = await list_tables(connection)
resources = [
Resource(
uri=f"mysql://{table_name}",
name=f"Table {table_name}",
mimeType="application/json",
description=f"Data from the {table_name} table in the MySQL database",
)
for table_name in tables
]
return resources
@server.read_resource()
async def read_resource(request: Dict[str, Any]) -> ResourceContents:
uri = request['uri']
table_name = uri.split('/')[-1]
connection = await connect_to_db()
results = await run_read_only_query(connection, f"SELECT * FROM {table_name}")
return ResourceContents(
uri=uri,
mimeType="application/json",
text=results
)
@server.list_resource_templates()
async def list_resource_templates() -> List[ResourceTemplate]:
return [
ResourceTemplate(
uriTemplate="mysql://{table_name}",
name="Table {table_name}",
mimeType="application/json",
description="Data from a specific table in the MySQL database",
)
]
@server.list_resource_templates()
async def read_resource_template(request: Dict[str, Any]) -> ResourceContents:
uri = request['uri']
table_name = uri.split('/')[-1]
connection = await connect_to_db()
results = await run_read_only_query(connection, f"SELECT * FROM {table_name}")
return ResourceContents(
uri=uri,
mimeType="application/json",
text=results
)
@server.list_tools()
async def list_tools() -> List[Tool]:
return [
Tool(
name="run_query",
description="Run a read-only SQL query on the MySQL database",
inputSchema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "SQL query to run",
"minLength": 1
}
},
"required": ["query"]
}
)
]
@server.call_tool()
async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
if name != "run_query":
raise McpError(ErrorData(code=INVALID_PARAMS, message="Unknown tool"))
query = arguments.get("query")
if not query:
raise McpError(ErrorData(code=INVALID_PARAMS, message="Query is required"))
connection = await connect_to_db()
results = await run_read_only_query(connection, query)
return [TextContent(type="text", text=results)]
@server.list_prompts()
async def list_prompts() -> List[Prompt]:
return [
Prompt(
name="data_analysis",
description="Common data analysis tasks using the MySQL database",
arguments=[
PromptArgument(
name="query",
description="SQL query for data analysis",
required=True
)
]
)
]
@server.get_prompt()
async def get_prompt(name: str, arguments: Dict[str, Any]) -> GetPromptResult:
if name != "data_analysis":
raise McpError(ErrorData(code=INVALID_PARAMS, message="Unknown prompt"))
query = arguments.get("query")
if not query:
raise McpError(ErrorData(code=INVALID_PARAMS, message="Query is required"))
connection = await connect_to_db()
results = await run_read_only_query(connection, query)
return GetPromptResult(
description=f"Results of the data analysis query: {query}",
messages=[
PromptMessage(
role="user",
content=TextContent(type="text", text=str(results))
)
]
)
options = server.create_initialization_options()
async with stdio_server() as (read_stream, write_stream):
await server.run(read_stream, write_stream, options, raise_exceptions=True)
if __name__ == "__main__":
import asyncio
asyncio.run(serve())