95 lines
3.2 KiB
Python
95 lines
3.2 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Main entry point for the text-based LLM interaction system.
|
|
"""
|
|
|
|
import sys
|
|
import json
|
|
from interface import TextInterface
|
|
from llm_client import LLMClient
|
|
from conversation import ConversationManager
|
|
from config import Config
|
|
from game_config import load_game_config
|
|
from game_state import GameState
|
|
|
|
|
|
def main():
|
|
"""Main function to run the text-based LLM interaction system."""
|
|
print("Text-Based LLM Interaction System")
|
|
print("Type 'quit' to exit the program")
|
|
print("-" * 40)
|
|
|
|
# Initialize components
|
|
config = Config()
|
|
interface = TextInterface()
|
|
conversation_manager = ConversationManager()
|
|
llm_client = LLMClient(config)
|
|
gs = GameState.from_files("state")
|
|
|
|
# Load game behavior config and seed conversation
|
|
gamecfg = load_game_config()
|
|
if gamecfg.system_prompt:
|
|
conversation_manager.add_system_message(gamecfg.system_prompt)
|
|
if gamecfg.start_message:
|
|
interface.display_response(gamecfg.start_message)
|
|
|
|
# Main interaction loop
|
|
while True:
|
|
try:
|
|
# Get user input
|
|
user_input = interface.get_user_input()
|
|
|
|
# Check for exit command
|
|
if user_input.lower() in ['quit', 'exit', 'q']:
|
|
print("Goodbye!")
|
|
break
|
|
|
|
# Add user message to conversation
|
|
conversation_manager.add_user_message(user_input)
|
|
|
|
# Apply deterministic game logic first
|
|
engine_outcome = gs.apply_action(user_input) # {"events": [...]}
|
|
|
|
# Provide a transient system message with canonical facts for narration
|
|
narrator_directive = {
|
|
"ENGINE_OUTCOME": {
|
|
"events": engine_outcome.get("events", []),
|
|
"state": gs.to_public_dict(),
|
|
},
|
|
"NARRATION_RULES": [
|
|
"Narrate strictly according to ENGINE_OUTCOME. Do not invent state.",
|
|
"Do not add items, unlock objects, or change inventory; the engine already did that.",
|
|
"Use 2-5 sentences, present tense, second person. Be concise and vivid.",
|
|
"If the action was impossible, explain why using the facts provided.",
|
|
],
|
|
}
|
|
transient_system = {
|
|
"role": "system",
|
|
"content": "ENGINE CONTEXT (JSON): " + json.dumps(narrator_directive),
|
|
}
|
|
|
|
# Get response from LLM with engine context
|
|
messages = list(conversation_manager.get_history()) + [transient_system]
|
|
response = llm_client.get_response(messages)
|
|
|
|
# Display response
|
|
interface.display_response(response)
|
|
|
|
# Add assistant message to conversation
|
|
conversation_manager.add_assistant_message(response)
|
|
|
|
# End scenario if completed
|
|
if gs.completed:
|
|
interface.display_system_message("Scenario complete. You unlocked the door and escaped.")
|
|
break
|
|
|
|
except KeyboardInterrupt:
|
|
print("\nGoodbye!")
|
|
break
|
|
except Exception as e:
|
|
print(f"An error occurred: {e}")
|
|
break
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |