Ollama mcp
A bridge that enables seamless integration of Ollama's local LLM capabilities into MCP-powered applications, allowing users to manage and run AI models locally with full API coverage.
A bridge that enables seamless integration of Ollama's local LLM capabilities into MCP-powered applications, allowing users to manage and run AI models locally with full API coverage.
? A powerful bridge between Ollama and the Model Context Protocol (MCP), enabling seamless integration of Ollama's local LLM capabilities into your MCP-powered applications.
Copy and remove models
? Model Execution
Raw mode support for direct responses
? Server Control
Install dependencies:
pnpm install
Build the server:
pnpm run build
Add the server to your MCP configuration:
MacOS: ~/Library/Application Support/Claude/claude_desktop_config.json
Windows: %APPDATA%/Claude/claude_desktop_config.json
{
"mcpServers": {
"ollama": {
"command": "node",
"args": ["/path/to/ollama-server/build/index.js"],
"env": {
"OLLAMA_HOST": "http://127.0.0.1:11434" // Optional: customize Ollama API endpoint
}
}
}
}
// Pull a model
await mcp.use_mcp_tool({
server_name: "ollama",
tool_name: "pull",
arguments: {
name: "llama2"
}
});
// Run the model
await mcp.use_mcp_tool({
server_name: "ollama",
tool_name: "run",
arguments: {
name: "llama2",
prompt: "Explain quantum computing in simple terms"
}
});
await mcp.use_mcp_tool({
server_name: "ollama",
tool_name: "chat_completion",
arguments: {
model: "llama2",
messages: [
{
role: "system",
content: "You are a helpful assistant."
},
{
role: "user",
content: "What is the meaning of life?"
}
],
temperature: 0.7
}
});
await mcp.use_mcp_tool({
server_name: "ollama",
tool_name: "create",
arguments: {
name: "custom-model",
modelfile: "./path/to/Modelfile"
}
});
OLLAMA_HOST
: Configure custom Ollama API endpoint (default: http://127.0.0.1:11434)Contributions are welcome! Feel free to: - Report bugs - Suggest new features - Submit pull requests
MIT License - feel free to use in your own projects!
Built with ❤️ for the MCP ecosystem
[
{
"description": "Start Ollama server",
"inputSchema": {
"additionalProperties": false,
"properties": {},
"type": "object"
},
"name": "serve"
},
{
"description": "Create a model from a Modelfile",
"inputSchema": {
"additionalProperties": false,
"properties": {
"modelfile": {
"description": "Path to Modelfile",
"type": "string"
},
"name": {
"description": "Name for the model",
"type": "string"
}
},
"required": [
"name",
"modelfile"
],
"type": "object"
},
"name": "create"
},
{
"description": "Show information for a model",
"inputSchema": {
"additionalProperties": false,
"properties": {
"name": {
"description": "Name of the model",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"name": "show"
},
{
"description": "Run a model",
"inputSchema": {
"additionalProperties": false,
"properties": {
"name": {
"description": "Name of the model",
"type": "string"
},
"prompt": {
"description": "Prompt to send to the model",
"type": "string"
},
"timeout": {
"description": "Timeout in milliseconds (default: 60000)",
"minimum": 1000,
"type": "number"
}
},
"required": [
"name",
"prompt"
],
"type": "object"
},
"name": "run"
},
{
"description": "Pull a model from a registry",
"inputSchema": {
"additionalProperties": false,
"properties": {
"name": {
"description": "Name of the model to pull",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"name": "pull"
},
{
"description": "Push a model to a registry",
"inputSchema": {
"additionalProperties": false,
"properties": {
"name": {
"description": "Name of the model to push",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"name": "push"
},
{
"description": "List models",
"inputSchema": {
"additionalProperties": false,
"properties": {},
"type": "object"
},
"name": "list"
},
{
"description": "Copy a model",
"inputSchema": {
"additionalProperties": false,
"properties": {
"destination": {
"description": "Destination model name",
"type": "string"
},
"source": {
"description": "Source model name",
"type": "string"
}
},
"required": [
"source",
"destination"
],
"type": "object"
},
"name": "cp"
},
{
"description": "Remove a model",
"inputSchema": {
"additionalProperties": false,
"properties": {
"name": {
"description": "Name of the model to remove",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"name": "rm"
},
{
"description": "OpenAI-compatible chat completion API",
"inputSchema": {
"additionalProperties": false,
"properties": {
"messages": {
"description": "Array of messages in the conversation",
"items": {
"properties": {
"content": {
"type": "string"
},
"role": {
"enum": [
"system",
"user",
"assistant"
],
"type": "string"
}
},
"required": [
"role",
"content"
],
"type": "object"
},
"type": "array"
},
"model": {
"description": "Name of the Ollama model to use",
"type": "string"
},
"temperature": {
"description": "Sampling temperature (0-2)",
"maximum": 2,
"minimum": 0,
"type": "number"
},
"timeout": {
"description": "Timeout in milliseconds (default: 60000)",
"minimum": 1000,
"type": "number"
}
},
"required": [
"model",
"messages"
],
"type": "object"
},
"name": "chat_completion"
}
]