Initial commit.
Basic docker deployment with Local LLM integration and simple game state.
This commit is contained in:
87
llm_client.py
Normal file
87
llm_client.py
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LLM client for communicating with LM Studio.
|
||||
"""
|
||||
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
class LLMClient:
|
||||
"""Client for communicating with LM Studio."""
|
||||
|
||||
def __init__(self, config):
|
||||
"""Initialize the LLM client.
|
||||
|
||||
Args:
|
||||
config (Config): Configuration object
|
||||
"""
|
||||
self.config = config
|
||||
self.session = requests.Session()
|
||||
self.session.timeout = self.config.REQUEST_TIMEOUT
|
||||
|
||||
def get_response(self, messages):
|
||||
"""Get a response from the LLM.
|
||||
|
||||
Args:
|
||||
messages (list): List of message dictionaries
|
||||
|
||||
Returns:
|
||||
str: The LLM response text
|
||||
"""
|
||||
try:
|
||||
# Prepare the request payload
|
||||
payload = {
|
||||
"model": self.config.DEFAULT_MODEL,
|
||||
"messages": messages,
|
||||
"temperature": self.config.TEMPERATURE,
|
||||
"max_tokens": self.config.MAX_TOKENS
|
||||
}
|
||||
|
||||
# Send request to LM Studio
|
||||
response = self.session.post(
|
||||
self.config.get_chat_completions_url(),
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(payload)
|
||||
)
|
||||
|
||||
# Raise an exception for bad status codes
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse the response
|
||||
response_data = response.json()
|
||||
|
||||
# Extract the assistant's message
|
||||
assistant_message = response_data["choices"][0]["message"]["content"]
|
||||
|
||||
return assistant_message
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise Exception(f"Error communicating with LM Studio: {e}")
|
||||
except (KeyError, IndexError) as e:
|
||||
raise Exception(f"Error parsing LM Studio response: {e}")
|
||||
except json.JSONDecodeError as e:
|
||||
raise Exception(f"Error decoding JSON response: {e}")
|
||||
|
||||
def test_connection(self):
|
||||
"""Test the connection to LM Studio.
|
||||
|
||||
Returns:
|
||||
bool: True if connection successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Try to get available models (simple connection test)
|
||||
response = self.session.get(f"{self.config.get_api_url()}/models")
|
||||
response.raise_for_status()
|
||||
return True
|
||||
except requests.exceptions.RequestException:
|
||||
return False
|
||||
|
||||
def update_model(self, model_name):
|
||||
"""Update the model used for completions.
|
||||
|
||||
Args:
|
||||
model_name (str): Name of the model to use
|
||||
"""
|
||||
self.config.DEFAULT_MODEL = model_name
|
||||
Reference in New Issue
Block a user