Add context-aware response generator, demo session, and bug fixes
Features: - Context-aware response generator for storyteller - Select multiple characters to include in context - Generate scene descriptions or individual responses - Individual responses auto-parsed and sent to each character - Improved prompt with explicit [CharacterName] format - Smart context building with character profiles and history - Demo session auto-creation on startup - Pre-configured 'The Cursed Tavern' adventure - Two characters: Bargin (Dwarf Warrior) and Willow (Elf Ranger) - Quick-access buttons on home page - Eliminates need to recreate test data - Session ID copy button for easy sharing Bug Fixes: - Fixed character chat history showing only most recent message - CharacterView now handles both 'storyteller_response' and 'new_message' - Fixed all Pydantic deprecation warnings - Replaced .dict() with .model_dump() (9 instances) - Fixed WebSocket manager reference in contextual responses UI Improvements: - Beautiful demo section with gradient styling - Format help text for individual responses - Improved messaging and confirmations Documentation: - CONTEXTUAL_RESPONSE_FEATURE.md - Complete feature documentation - DEMO_SESSION.md - Demo session guide - FIXES_SUMMARY.md - Bug fix summary - PROMPT_IMPROVEMENTS.md - Prompt engineering details
This commit is contained in:
@@ -12,6 +12,16 @@ function StorytellerView({ sessionId }) {
|
||||
const [currentScene, setCurrentScene] = useState('');
|
||||
const [isConnected, setIsConnected] = useState(false);
|
||||
const [isGeneratingSuggestion, setIsGeneratingSuggestion] = useState(false);
|
||||
|
||||
// Context-aware response state
|
||||
const [selectedCharacterIds, setSelectedCharacterIds] = useState([]);
|
||||
const [contextualResponseType, setContextualResponseType] = useState('scene');
|
||||
const [contextualAdditionalContext, setContextualAdditionalContext] = useState('');
|
||||
const [contextualModel, setContextualModel] = useState('gpt-4o');
|
||||
const [isGeneratingContextual, setIsGeneratingContextual] = useState(false);
|
||||
const [generatedContextualResponse, setGeneratedContextualResponse] = useState('');
|
||||
const [showContextualGenerator, setShowContextualGenerator] = useState(false);
|
||||
|
||||
const wsRef = useRef(null);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -137,6 +147,108 @@ function StorytellerView({ sessionId }) {
|
||||
}
|
||||
};
|
||||
|
||||
// Toggle character selection for contextual response
|
||||
const toggleCharacterSelection = (charId) => {
|
||||
setSelectedCharacterIds(prev =>
|
||||
prev.includes(charId)
|
||||
? prev.filter(id => id !== charId)
|
||||
: [...prev, charId]
|
||||
);
|
||||
};
|
||||
|
||||
// Select all characters with pending messages
|
||||
const selectAllPending = () => {
|
||||
const pendingIds = Object.entries(characters)
|
||||
.filter(([_, char]) => char.pending_response)
|
||||
.map(([id, _]) => id);
|
||||
setSelectedCharacterIds(pendingIds);
|
||||
};
|
||||
|
||||
// Generate contextual response
|
||||
const generateContextualResponse = async () => {
|
||||
if (selectedCharacterIds.length === 0 || isGeneratingContextual) return;
|
||||
|
||||
setIsGeneratingContextual(true);
|
||||
setGeneratedContextualResponse('');
|
||||
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${API_URL}/sessions/${sessionId}/generate_contextual_response`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
character_ids: selectedCharacterIds,
|
||||
response_type: contextualResponseType,
|
||||
model: contextualModel,
|
||||
additional_context: contextualAdditionalContext || null
|
||||
})
|
||||
}
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to generate contextual response');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
// If individual responses were sent, show confirmation
|
||||
if (data.response_type === 'individual' && data.individual_responses_sent) {
|
||||
const sentCount = Object.keys(data.individual_responses_sent).length;
|
||||
const sentNames = Object.keys(data.individual_responses_sent).join(', ');
|
||||
|
||||
if (sentCount > 0) {
|
||||
alert(`✅ Individual responses sent to ${sentCount} character(s): ${sentNames}\n\nThe responses have been delivered privately to each character.`);
|
||||
|
||||
// Clear selections after successful send
|
||||
setSelectedCharacterIds([]);
|
||||
setContextualAdditionalContext('');
|
||||
|
||||
// Update character states to reflect no pending responses
|
||||
setCharacters(prev => {
|
||||
const updated = { ...prev };
|
||||
Object.keys(data.individual_responses_sent).forEach(charName => {
|
||||
const charEntry = Object.entries(updated).find(([_, char]) => char.name === charName);
|
||||
if (charEntry) {
|
||||
const [charId, char] = charEntry;
|
||||
updated[charId] = { ...char, pending_response: false };
|
||||
}
|
||||
});
|
||||
return updated;
|
||||
});
|
||||
}
|
||||
|
||||
// Still show the full generated response for reference
|
||||
setGeneratedContextualResponse(data.response);
|
||||
} else {
|
||||
// Scene description - just show the response
|
||||
setGeneratedContextualResponse(data.response);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error generating contextual response:', error);
|
||||
alert('Failed to generate contextual response. Please try again.');
|
||||
} finally {
|
||||
setIsGeneratingContextual(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Use generated response as scene
|
||||
const useAsScene = () => {
|
||||
if (!generatedContextualResponse) return;
|
||||
setSceneText(generatedContextualResponse);
|
||||
setShowContextualGenerator(false);
|
||||
};
|
||||
|
||||
// Copy session ID to clipboard
|
||||
const copySessionId = () => {
|
||||
navigator.clipboard.writeText(sessionId).then(() => {
|
||||
alert('✅ Session ID copied to clipboard!');
|
||||
}).catch(err => {
|
||||
console.error('Failed to copy:', err);
|
||||
alert('Failed to copy session ID. Please copy it manually.');
|
||||
});
|
||||
};
|
||||
|
||||
const selectedChar = selectedCharacter ? characters[selectedCharacter] : null;
|
||||
const pendingCount = Object.values(characters).filter(c => c.pending_response).length;
|
||||
|
||||
@@ -145,7 +257,14 @@ function StorytellerView({ sessionId }) {
|
||||
<div className="storyteller-header">
|
||||
<div>
|
||||
<h1>🎲 Storyteller Dashboard</h1>
|
||||
<p className="session-id">Session ID: <code>{sessionId}</code></p>
|
||||
<div className="session-id-container">
|
||||
<p className="session-id">
|
||||
Session ID: <code>{sessionId}</code>
|
||||
</p>
|
||||
<button className="btn-copy" onClick={copySessionId} title="Copy Session ID">
|
||||
📋 Copy
|
||||
</button>
|
||||
</div>
|
||||
<p className="connection-status">
|
||||
<span className={`status-indicator ${isConnected ? 'connected' : 'disconnected'}`}>
|
||||
{isConnected ? '● Connected' : '○ Disconnected'}
|
||||
@@ -197,6 +316,136 @@ function StorytellerView({ sessionId }) {
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Contextual Response Generator */}
|
||||
<div className="contextual-section">
|
||||
<div className="contextual-header">
|
||||
<h3>🧠 AI Context-Aware Response Generator</h3>
|
||||
<button
|
||||
className="btn-secondary"
|
||||
onClick={() => setShowContextualGenerator(!showContextualGenerator)}
|
||||
>
|
||||
{showContextualGenerator ? '▼ Hide' : '▶ Show'} Generator
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{showContextualGenerator && (
|
||||
<div className="contextual-generator">
|
||||
<p className="contextual-description">
|
||||
Generate a response that takes into account multiple characters' actions and messages.
|
||||
Perfect for creating scenes or responses that incorporate everyone's contributions.
|
||||
</p>
|
||||
|
||||
{/* Character Selection */}
|
||||
<div className="character-selection">
|
||||
<div className="selection-header">
|
||||
<h4>Select Characters to Include:</h4>
|
||||
<button className="btn-small" onClick={selectAllPending} disabled={pendingCount === 0}>
|
||||
Select All Pending ({pendingCount})
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="character-checkboxes">
|
||||
{Object.entries(characters).map(([id, char]) => (
|
||||
<label key={id} className={`character-checkbox ${char.pending_response ? 'has-pending' : ''}`}>
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={selectedCharacterIds.includes(id)}
|
||||
onChange={() => toggleCharacterSelection(id)}
|
||||
/>
|
||||
<span className="checkbox-label">
|
||||
{char.name}
|
||||
{char.pending_response && <span className="pending-badge-small">●</span>}
|
||||
<span className="message-count">({char.conversation_history?.length || 0} msgs)</span>
|
||||
</span>
|
||||
</label>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{selectedCharacterIds.length > 0 && (
|
||||
<div className="selection-summary">
|
||||
Selected: {selectedCharacterIds.map(id => characters[id]?.name).join(', ')}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Response Type */}
|
||||
<div className="response-type-selector">
|
||||
<label>
|
||||
<strong>Response Type:</strong>
|
||||
<select
|
||||
value={contextualResponseType}
|
||||
onChange={(e) => setContextualResponseType(e.target.value)}
|
||||
>
|
||||
<option value="scene">Scene Description (broadcast to all)</option>
|
||||
<option value="individual">Individual Responses (sent privately to each character)</option>
|
||||
</select>
|
||||
</label>
|
||||
{contextualResponseType === 'individual' && (
|
||||
<p className="response-type-help">
|
||||
💡 The AI will generate responses in this format: <code>[CharacterName] Response text here</code>. Each response is automatically parsed and sent privately to the respective character.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Model Selection */}
|
||||
<div className="model-selector-contextual">
|
||||
<label>
|
||||
<strong>LLM Model:</strong>
|
||||
<select
|
||||
value={contextualModel}
|
||||
onChange={(e) => setContextualModel(e.target.value)}
|
||||
>
|
||||
<option value="gpt-4o">GPT-4o (Latest)</option>
|
||||
<option value="gpt-4-turbo">GPT-4 Turbo</option>
|
||||
<option value="gpt-4">GPT-4</option>
|
||||
<option value="gpt-3.5-turbo">GPT-3.5 Turbo</option>
|
||||
</select>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
{/* Additional Context */}
|
||||
<div className="additional-context">
|
||||
<label>
|
||||
<strong>Additional Context (optional):</strong>
|
||||
<textarea
|
||||
placeholder="Add any extra information or guidance for the AI (e.g., 'Make it dramatic', 'They should encounter danger', etc.)"
|
||||
value={contextualAdditionalContext}
|
||||
onChange={(e) => setContextualAdditionalContext(e.target.value)}
|
||||
rows="2"
|
||||
/>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
{/* Generate Button */}
|
||||
<button
|
||||
className="btn-primary btn-large"
|
||||
onClick={generateContextualResponse}
|
||||
disabled={selectedCharacterIds.length === 0 || isGeneratingContextual || !isConnected}
|
||||
>
|
||||
{isGeneratingContextual ? '⏳ Generating...' : '✨ Generate Context-Aware Response'}
|
||||
</button>
|
||||
|
||||
{/* Generated Response */}
|
||||
{generatedContextualResponse && (
|
||||
<div className="generated-response">
|
||||
<h4>Generated Response:</h4>
|
||||
<div className="response-content">
|
||||
{generatedContextualResponse}
|
||||
</div>
|
||||
<div className="response-actions">
|
||||
<button className="btn-primary" onClick={useAsScene}>
|
||||
Use as Scene
|
||||
</button>
|
||||
<button className="btn-secondary" onClick={() => setGeneratedContextualResponse('')}>
|
||||
Clear
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="storyteller-content">
|
||||
<div className="character-list">
|
||||
<h3>Characters ({Object.keys(characters).length})</h3>
|
||||
|
||||
Reference in New Issue
Block a user