Skip to content

AI Integration

MAID integrates with multiple AI providers to enable intelligent NPCs, procedural content generation, and dynamic storytelling.

Overview

The AI system provides:

  • LLM Provider Abstraction: Unified interface for multiple AI providers
  • Provider Registry: Manage and switch between providers
  • Configuration: Environment-based setup
  • Utilities: Common patterns for AI-powered features

Supported Providers

Provider Model Examples Use Case
Anthropic Claude 3.5 Sonnet, Claude 3 Opus High-quality dialogue, complex reasoning
OpenAI GPT-4, GPT-3.5-turbo General purpose, embeddings
Ollama Llama 2, Mistral Local/private deployment

Configuration

Environment Variables

# Default provider
MAID_AI__DEFAULT_PROVIDER=anthropic

# Anthropic
MAID_AI__ANTHROPIC_API_KEY=sk-ant-...
MAID_AI__ANTHROPIC_MODEL=claude-3-sonnet-20240229

# OpenAI
MAID_AI__OPENAI_API_KEY=sk-...
MAID_AI__OPENAI_MODEL=gpt-4-turbo-preview

# Ollama (local)
MAID_AI__OLLAMA_HOST=http://localhost:11434
MAID_AI__OLLAMA_MODEL=llama2

# General settings
MAID_AI__REQUEST_TIMEOUT=30

Programmatic Configuration

from maid_engine.config import get_settings

settings = get_settings()
settings.ai.default_provider = "anthropic"
settings.ai.anthropic_model = "claude-3-sonnet-20240229"

LLM Provider Registry

Getting the Registry

from maid_engine.ai import get_registry, set_registry

# Get global registry
registry = get_registry()

# Get provider
provider = registry.get()  # Default provider
provider = registry.get("anthropic")  # Specific provider

Registering Providers

from maid_engine.ai import LLMProviderRegistry
from maid_engine.ai.providers.anthropic import AnthropicProvider

registry = LLMProviderRegistry()

# Register Anthropic
registry.register(AnthropicProvider(
    api_key="sk-ant-...",
    default_model="claude-3-sonnet-20240229",
))

# Register OpenAI
from maid_engine.ai.providers.openai import OpenAIProvider
registry.register(OpenAIProvider(
    api_key="sk-...",
    default_model="gpt-4-turbo-preview",
))

# Set default
registry.set_default("anthropic")

Automatic Setup

from maid_engine.ai.registry import create_registry_from_settings

settings = get_settings()
registry = create_registry_from_settings(settings)
set_registry(registry)

Basic Usage

Simple Completion

from maid_engine.ai import Message, get_registry

registry = get_registry()
provider = registry.get()

# Create messages
messages = [
    Message.system("You are a helpful NPC in a fantasy game."),
    Message.user("What's your name?"),
]

# Get completion
result = await provider.complete(messages)
print(result.content)

With Options

from maid_engine.ai import CompletionOptions

options = CompletionOptions(
    max_tokens=200,
    temperature=0.7,
    stop_sequences=["Player:"],
)

result = await provider.complete(messages, options)

Checking Availability

# Check if provider is configured and working
if await provider.is_available():
    result = await provider.complete(messages)
else:
    # Fallback behavior
    result = default_response()

Message Types

Creating Messages

from maid_engine.ai import Message

# System message (sets behavior)
system = Message.system("You are a wise wizard NPC.")

# User message (player input)
user = Message.user("Tell me about magic.")

# Assistant message (previous AI response)
assistant = Message.assistant("Magic flows through all things...")

Message History

messages = [
    Message.system("You are a merchant NPC. Be helpful but shrewd."),
    Message.user("What do you have for sale?"),
    Message.assistant("Ah, a customer! I have fine swords and sturdy shields."),
    Message.user("How much for a sword?"),
]

result = await provider.complete(messages)
# AI responds with context of previous conversation

NPC Dialogue

Basic NPC Response

async def npc_respond(npc: Entity, player_input: str) -> str:
    """Generate an NPC response to player input."""
    name = npc.get(NameComponent).name
    personality = npc.get(PersonalityComponent)

    messages = [
        Message.system(
            f"You are {name}, an NPC in a fantasy game. "
            f"Personality: {personality.description}. "
            f"Keep responses brief (1-2 sentences)."
        ),
        Message.user(player_input),
    ]

    result = await provider.complete(messages, CompletionOptions(
        max_tokens=100,
        temperature=0.8,
    ))

    return result.content

Contextual Dialogue

async def contextual_npc_response(
    npc: Entity,
    player: Entity,
    player_input: str,
    conversation_history: list[tuple[str, str]],
) -> str:
    """Generate context-aware NPC dialogue."""
    npc_name = npc.get(NameComponent).name
    player_name = player.get(NameComponent).name
    location = get_room_name(npc)

    # Build context
    context = f"""You are {npc_name}, an NPC in {location}.
Your personality: {npc.get(PersonalityComponent).description}
Current mood: {npc.get(MoodComponent).current}
Relationship with {player_name}: {get_relationship(npc, player)}

Keep responses in character, brief (1-3 sentences)."""

    # Build messages with history
    messages = [Message.system(context)]

    for speaker, text in conversation_history[-5:]:  # Last 5 exchanges
        if speaker == "player":
            messages.append(Message.user(text))
        else:
            messages.append(Message.assistant(text))

    messages.append(Message.user(player_input))

    result = await provider.complete(messages)
    return result.content

Procedural Content Generation

Room Descriptions

async def generate_room_description(room_type: str, theme: str) -> dict:
    """Generate a room description using AI."""
    prompt = f"""Generate a room for a MUD game.
Type: {room_type}
Theme: {theme}

Respond in JSON format:
{{
    "name": "Room name",
    "description": "2-3 sentence description",
    "exits": ["possible exits"],
    "items": ["items that might be here"]
}}"""

    messages = [
        Message.system("You are a creative game content designer."),
        Message.user(prompt),
    ]

    result = await provider.complete(messages, CompletionOptions(
        temperature=0.8,
        max_tokens=500,
    ))

    return json.loads(result.content)

Item Generation

async def generate_item(item_type: str, rarity: str) -> dict:
    """Generate a random item."""
    prompt = f"""Create a {rarity} {item_type} for a fantasy MUD.

Respond in JSON:
{{
    "name": "Item name",
    "description": "Brief description",
    "type": "{item_type}",
    "rarity": "{rarity}",
    "stats": {{"relevant_stat": value}},
    "value": gold_value
}}"""

    messages = [
        Message.system("You create balanced, interesting game items."),
        Message.user(prompt),
    ]

    result = await provider.complete(messages)
    return json.loads(result.content)

Quest Generation

async def generate_quest(difficulty: str, location: str) -> dict:
    """Generate a procedural quest."""
    prompt = f"""Create a {difficulty} difficulty quest set in {location}.

Include:
- Clear objective
- Backstory motivation
- Rewards appropriate for difficulty

Respond in JSON:
{{
    "name": "Quest name",
    "description": "Quest giver's description",
    "objective": "What player must do",
    "steps": ["step 1", "step 2"],
    "rewards": {{"gold": amount, "xp": amount}},
    "dialogue": {{
        "start": "Quest giver's intro",
        "progress": "Mid-quest check-in",
        "complete": "Completion dialogue"
    }}
}}"""

    messages = [
        Message.system("You are a quest designer for fantasy RPGs."),
        Message.user(prompt),
    ]

    result = await provider.complete(messages)
    return json.loads(result.content)

CLI Testing

Test AI providers from the command line:

# Test with default provider
maid dev test-ai "Hello, tell me a joke"

# Test specific provider
maid dev test-ai "Hello" --provider openai

Error Handling

Provider Errors

from maid_engine.ai import ProviderError, RateLimitError

async def safe_ai_call(messages: list[Message]) -> str | None:
    """Call AI with error handling."""
    try:
        provider = get_registry().get()
        result = await provider.complete(messages)
        return result.content

    except RateLimitError:
        logger.warning("AI rate limit hit, using fallback")
        return None

    except ProviderError as e:
        logger.error(f"AI provider error: {e}")
        return None

    except Exception as e:
        logger.exception(f"Unexpected AI error: {e}")
        return None

Fallback Behavior

async def npc_dialogue_with_fallback(
    npc: Entity,
    player_input: str,
) -> str:
    """NPC dialogue with fallback for AI failures."""
    # Try AI response
    ai_response = await safe_ai_call([
        Message.system(f"You are {npc.get(NameComponent).name}."),
        Message.user(player_input),
    ])

    if ai_response:
        return ai_response

    # Fallback to scripted responses
    return get_scripted_response(npc, player_input)

Best Practices

1. Set Appropriate Limits

# Limit token usage
options = CompletionOptions(
    max_tokens=100,  # Short responses
    temperature=0.7,  # Balanced creativity
)

2. Cache Responses

from functools import lru_cache

@lru_cache(maxsize=100)
async def cached_room_description(room_type: str, theme: str) -> str:
    """Cache generated room descriptions."""
    result = await generate_room_description(room_type, theme)
    return result["description"]

3. Use Structured Output

# Request JSON for reliable parsing
prompt = """Respond only in valid JSON format:
{"name": "...", "description": "..."}"""

# Validate response
try:
    data = json.loads(result.content)
except json.JSONDecodeError:
    # Handle malformed response
    data = extract_content_fallback(result.content)

4. Provide Clear Context

# Good - specific, constrained
Message.system(
    "You are a gruff blacksmith NPC. "
    "Keep responses under 2 sentences. "
    "You only talk about weapons and armor."
)

# Bad - vague, unlimited
Message.system("You are an NPC.")

5. Handle Provider Unavailability

registry = get_registry()

# Check available providers
available = await registry.get_available()
if "anthropic" in available:
    provider = registry.get("anthropic")
elif "openai" in available:
    provider = registry.get("openai")
else:
    # Use mock or disable AI features
    provider = MockProvider()

Custom Providers

Implement custom providers:

from maid_engine.ai.providers.base import LLMProvider, CompletionResult

class CustomProvider(LLMProvider):
    name = "custom"
    default_model = "custom-model"

    def __init__(self, endpoint: str, api_key: str):
        self._endpoint = endpoint
        self._api_key = api_key

    async def complete(
        self,
        messages: list[Message],
        options: CompletionOptions | None = None,
    ) -> CompletionResult:
        """Call custom API."""
        # Implement API call
        response = await self._call_api(messages, options)

        return CompletionResult(
            content=response["text"],
            model=self.default_model,
            usage={
                "prompt_tokens": response["input_tokens"],
                "completion_tokens": response["output_tokens"],
            },
        )

    async def is_available(self) -> bool:
        """Check if provider is available."""
        try:
            await self._health_check()
            return True
        except Exception:
            return False

# Register custom provider
registry.register(CustomProvider(
    endpoint="https://custom-ai.example.com",
    api_key="...",
))

Next Steps