- Add extensive tests for igdblib.py (0% -> 100% coverage expected) - Add tests for leonardo_draw.py AI image generation - Add tests for openai_responder.py with GPT integration - Add tests for discord_bot.py bot functionality - Add extended tests for ai_responder.py edge cases - Fix critical bugs in igdblib.py: * Fix platforms() method treating name as string instead of list * Fix game_info() method missing endpoint parameter * Add safe dictionary access with .get() methods Coverage improvements target areas with lowest coverage to maximize impact. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
350 lines
14 KiB
Python
350 lines
14 KiB
Python
import unittest
|
|
from io import BytesIO
|
|
from unittest.mock import AsyncMock, Mock, patch
|
|
|
|
import openai
|
|
|
|
from fjerkroa_bot.openai_responder import OpenAIResponder, openai_chat, openai_image
|
|
|
|
|
|
class TestOpenAIResponder(unittest.IsolatedAsyncioTestCase):
|
|
def setUp(self):
|
|
self.config = {
|
|
"openai-key": "test_key",
|
|
"model": "gpt-4",
|
|
"model-vision": "gpt-4-vision",
|
|
"retry-model": "gpt-3.5-turbo",
|
|
"fix-model": "gpt-4",
|
|
"fix-description": "Fix JSON documents",
|
|
"memory-model": "gpt-4",
|
|
"memory-system": "You are a memory assistant"
|
|
}
|
|
self.responder = OpenAIResponder(self.config)
|
|
|
|
def test_init(self):
|
|
"""Test OpenAIResponder initialization."""
|
|
self.assertIsNotNone(self.responder.client)
|
|
self.assertEqual(self.responder.config, self.config)
|
|
|
|
def test_init_with_openai_token(self):
|
|
"""Test initialization with openai-token instead of openai-key."""
|
|
config = {"openai-token": "test_token", "model": "gpt-4"}
|
|
responder = OpenAIResponder(config)
|
|
self.assertIsNotNone(responder.client)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_image")
|
|
async def test_draw_openai_success(self, mock_openai_image):
|
|
"""Test successful image generation with OpenAI."""
|
|
mock_image_data = BytesIO(b"fake_image_data")
|
|
mock_openai_image.return_value = mock_image_data
|
|
|
|
result = await self.responder.draw_openai("A beautiful landscape")
|
|
|
|
self.assertEqual(result, mock_image_data)
|
|
mock_openai_image.assert_called_once_with(
|
|
self.responder.client,
|
|
prompt="A beautiful landscape",
|
|
n=1,
|
|
size="1024x1024",
|
|
model="dall-e-3"
|
|
)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_image")
|
|
async def test_draw_openai_retry_on_failure(self, mock_openai_image):
|
|
"""Test retry logic when image generation fails."""
|
|
mock_openai_image.side_effect = [
|
|
Exception("First failure"),
|
|
Exception("Second failure"),
|
|
BytesIO(b"success_data")
|
|
]
|
|
|
|
result = await self.responder.draw_openai("test description")
|
|
|
|
self.assertEqual(mock_openai_image.call_count, 3)
|
|
self.assertEqual(result.read(), b"success_data")
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_image")
|
|
async def test_draw_openai_max_retries_exceeded(self, mock_openai_image):
|
|
"""Test when all retries are exhausted."""
|
|
mock_openai_image.side_effect = Exception("Persistent failure")
|
|
|
|
with self.assertRaises(RuntimeError) as context:
|
|
await self.responder.draw_openai("test description")
|
|
|
|
self.assertEqual(mock_openai_image.call_count, 3)
|
|
self.assertIn("Failed to generate image", str(context.exception))
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_with_string_content(self, mock_openai_chat):
|
|
"""Test chat with string message content."""
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = "Hello!"
|
|
mock_response.choices[0].message.role = "assistant"
|
|
mock_response.usage = Mock()
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
messages = [{"role": "user", "content": "Hi there"}]
|
|
result, limit = await self.responder.chat(messages, 10)
|
|
|
|
expected_answer = {"content": "Hello!", "role": "assistant"}
|
|
self.assertEqual(result, expected_answer)
|
|
self.assertEqual(limit, 10)
|
|
|
|
mock_openai_chat.assert_called_once_with(
|
|
self.responder.client,
|
|
model="gpt-4",
|
|
messages=messages
|
|
)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_with_vision_model(self, mock_openai_chat):
|
|
"""Test chat with vision model for non-string content."""
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = "I see an image"
|
|
mock_response.choices[0].message.role = "assistant"
|
|
mock_response.usage = Mock()
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
messages = [{"role": "user", "content": [{"type": "image", "data": "base64data"}]}]
|
|
result, limit = await self.responder.chat(messages, 10)
|
|
|
|
mock_openai_chat.assert_called_once_with(
|
|
self.responder.client,
|
|
model="gpt-4-vision",
|
|
messages=messages
|
|
)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_content_fallback(self, mock_openai_chat):
|
|
"""Test chat content fallback when no vision model."""
|
|
config_no_vision = {"openai-key": "test", "model": "gpt-4"}
|
|
responder = OpenAIResponder(config_no_vision)
|
|
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = "Text response"
|
|
mock_response.choices[0].message.role = "assistant"
|
|
mock_response.usage = Mock()
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
messages = [{"role": "user", "content": [{"text": "Hello", "type": "text"}]}]
|
|
result, limit = await responder.chat(messages, 10)
|
|
|
|
# Should modify the message content to just the text
|
|
expected_messages = [{"role": "user", "content": "Hello"}]
|
|
mock_openai_chat.assert_called_once_with(
|
|
responder.client,
|
|
model="gpt-4",
|
|
messages=expected_messages
|
|
)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_bad_request_error(self, mock_openai_chat):
|
|
"""Test handling of BadRequestError with context length."""
|
|
mock_openai_chat.side_effect = openai.BadRequestError(
|
|
"maximum context length is exceeded", response=Mock(), body=None
|
|
)
|
|
|
|
messages = [{"role": "user", "content": "test"}]
|
|
result, limit = await self.responder.chat(messages, 10)
|
|
|
|
self.assertIsNone(result)
|
|
self.assertEqual(limit, 9) # Should decrease limit
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_bad_request_error_reraise(self, mock_openai_chat):
|
|
"""Test re-raising BadRequestError when not context length issue."""
|
|
error = openai.BadRequestError("Invalid model", response=Mock(), body=None)
|
|
mock_openai_chat.side_effect = error
|
|
|
|
messages = [{"role": "user", "content": "test"}]
|
|
|
|
with self.assertRaises(openai.BadRequestError):
|
|
await self.responder.chat(messages, 10)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_rate_limit_error(self, mock_openai_chat):
|
|
"""Test handling of RateLimitError."""
|
|
mock_openai_chat.side_effect = openai.RateLimitError(
|
|
"Rate limit exceeded", response=Mock(), body=None
|
|
)
|
|
|
|
with patch("asyncio.sleep") as mock_sleep:
|
|
messages = [{"role": "user", "content": "test"}]
|
|
result, limit = await self.responder.chat(messages, 10)
|
|
|
|
self.assertIsNone(result)
|
|
self.assertEqual(limit, 10)
|
|
mock_sleep.assert_called_once()
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_rate_limit_with_retry_model(self, mock_openai_chat):
|
|
"""Test rate limit error uses retry model."""
|
|
mock_openai_chat.side_effect = openai.RateLimitError(
|
|
"Rate limit exceeded", response=Mock(), body=None
|
|
)
|
|
|
|
with patch("asyncio.sleep"):
|
|
messages = [{"role": "user", "content": "test"}]
|
|
result, limit = await self.responder.chat(messages, 10)
|
|
|
|
# Should set model to retry-model internally
|
|
self.assertIsNone(result)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_chat_generic_exception(self, mock_openai_chat):
|
|
"""Test handling of generic exceptions."""
|
|
mock_openai_chat.side_effect = Exception("Network error")
|
|
|
|
messages = [{"role": "user", "content": "test"}]
|
|
result, limit = await self.responder.chat(messages, 10)
|
|
|
|
self.assertIsNone(result)
|
|
self.assertEqual(limit, 10)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_fix_success(self, mock_openai_chat):
|
|
"""Test successful JSON fix."""
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = '{"answer": "fixed", "valid": true}'
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
result = await self.responder.fix('{"answer": "broken"')
|
|
|
|
self.assertEqual(result, '{"answer": "fixed", "valid": true}')
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_fix_invalid_json_response(self, mock_openai_chat):
|
|
"""Test fix with invalid JSON response."""
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = 'This is not JSON'
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
original_answer = '{"answer": "test"}'
|
|
result = await self.responder.fix(original_answer)
|
|
|
|
# Should return original answer when fix fails
|
|
self.assertEqual(result, original_answer)
|
|
|
|
async def test_fix_no_fix_model(self):
|
|
"""Test fix when no fix-model is configured."""
|
|
config_no_fix = {"openai-key": "test", "model": "gpt-4"}
|
|
responder = OpenAIResponder(config_no_fix)
|
|
|
|
original_answer = '{"answer": "test"}'
|
|
result = await responder.fix(original_answer)
|
|
|
|
self.assertEqual(result, original_answer)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_fix_exception_handling(self, mock_openai_chat):
|
|
"""Test fix exception handling."""
|
|
mock_openai_chat.side_effect = Exception("API Error")
|
|
|
|
original_answer = '{"answer": "test"}'
|
|
result = await self.responder.fix(original_answer)
|
|
|
|
self.assertEqual(result, original_answer)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_translate_success(self, mock_openai_chat):
|
|
"""Test successful translation."""
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = "Hola mundo"
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
result = await self.responder.translate("Hello world", "spanish")
|
|
|
|
self.assertEqual(result, "Hola mundo")
|
|
mock_openai_chat.assert_called_once()
|
|
|
|
async def test_translate_no_fix_model(self):
|
|
"""Test translate when no fix-model is configured."""
|
|
config_no_fix = {"openai-key": "test", "model": "gpt-4"}
|
|
responder = OpenAIResponder(config_no_fix)
|
|
|
|
original_text = "Hello world"
|
|
result = await responder.translate(original_text)
|
|
|
|
self.assertEqual(result, original_text)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_translate_exception_handling(self, mock_openai_chat):
|
|
"""Test translate exception handling."""
|
|
mock_openai_chat.side_effect = Exception("API Error")
|
|
|
|
original_text = "Hello world"
|
|
result = await self.responder.translate(original_text)
|
|
|
|
self.assertEqual(result, original_text)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_memory_rewrite_success(self, mock_openai_chat):
|
|
"""Test successful memory rewrite."""
|
|
mock_response = Mock()
|
|
mock_response.choices = [Mock()]
|
|
mock_response.choices[0].message.content = "Updated memory content"
|
|
mock_openai_chat.return_value = mock_response
|
|
|
|
result = await self.responder.memory_rewrite(
|
|
"Old memory", "user1", "assistant", "What's your name?", "I'm Claude"
|
|
)
|
|
|
|
self.assertEqual(result, "Updated memory content")
|
|
|
|
async def test_memory_rewrite_no_memory_model(self):
|
|
"""Test memory rewrite when no memory-model is configured."""
|
|
config_no_memory = {"openai-key": "test", "model": "gpt-4"}
|
|
responder = OpenAIResponder(config_no_memory)
|
|
|
|
original_memory = "Old memory"
|
|
result = await responder.memory_rewrite(
|
|
original_memory, "user1", "assistant", "question", "answer"
|
|
)
|
|
|
|
self.assertEqual(result, original_memory)
|
|
|
|
@patch("fjerkroa_bot.openai_responder.openai_chat")
|
|
async def test_memory_rewrite_exception_handling(self, mock_openai_chat):
|
|
"""Test memory rewrite exception handling."""
|
|
mock_openai_chat.side_effect = Exception("API Error")
|
|
|
|
original_memory = "Old memory"
|
|
result = await self.responder.memory_rewrite(
|
|
original_memory, "user1", "assistant", "question", "answer"
|
|
)
|
|
|
|
self.assertEqual(result, original_memory)
|
|
|
|
|
|
class TestOpenAIFunctions(unittest.IsolatedAsyncioTestCase):
|
|
"""Test the standalone openai functions."""
|
|
|
|
@patch("fjerkroa_bot.openai_responder.async_cache_to_file")
|
|
async def test_openai_chat_function(self, mock_cache):
|
|
"""Test the openai_chat caching function."""
|
|
mock_client = Mock()
|
|
mock_response = Mock()
|
|
mock_client.chat.completions.create.return_value = mock_response
|
|
|
|
# The function should be wrapped with caching
|
|
self.assertTrue(callable(openai_chat))
|
|
|
|
@patch("fjerkroa_bot.openai_responder.async_cache_to_file")
|
|
async def test_openai_image_function(self, mock_cache):
|
|
"""Test the openai_image caching function."""
|
|
mock_client = Mock()
|
|
mock_response = Mock()
|
|
mock_response.data = [Mock(url="http://example.com/image.jpg")]
|
|
|
|
# The function should be wrapped with caching
|
|
self.assertTrue(callable(openai_image))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main() |