import asyncio import json import logging from io import BytesIO from typing import Any, Dict, List, Optional, Tuple import aiohttp import openai from .ai_responder import AIResponder, async_cache_to_file, exponential_backoff, pp from .igdblib import IGDBQuery from .leonardo_draw import LeonardoAIDrawMixIn @async_cache_to_file("openai_chat.dat") async def openai_chat(client, *args, **kwargs): return await client.chat.completions.create(*args, **kwargs) @async_cache_to_file("openai_chat.dat") async def openai_image(client, *args, **kwargs): response = await client.images.generate(*args, **kwargs) async with aiohttp.ClientSession() as session: async with session.get(response.data[0].url) as image: return BytesIO(await image.read()) class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn): def __init__(self, config: Dict[str, Any], channel: Optional[str] = None) -> None: super().__init__(config, channel) self.client = openai.AsyncOpenAI(api_key=self.config.get("openai-token", self.config.get("openai-key", ""))) # Initialize IGDB if enabled self.igdb = None if (self.config.get("enable-game-info", False) and self.config.get("igdb-client-id") and self.config.get("igdb-access-token")): try: self.igdb = IGDBQuery( self.config["igdb-client-id"], self.config["igdb-access-token"] ) logging.info("IGDB integration enabled for game information") except Exception as e: logging.warning(f"Failed to initialize IGDB: {e}") self.igdb = None async def draw_openai(self, description: str) -> BytesIO: for _ in range(3): try: response = await openai_image(self.client, prompt=description, n=1, size="1024x1024", model="dall-e-3") logging.info(f"Drawed a picture with DALL-E on this description: {repr(description)}") return response except Exception as err: logging.warning(f"Failed to generate image {repr(description)}: {repr(err)}") raise RuntimeError(f"Failed to generate image {repr(description)} after multiple retries") async def chat(self, messages: List[Dict[str, Any]], limit: int) -> Tuple[Optional[Dict[str, Any]], int]: if isinstance(messages[-1]["content"], str): model = self.config["model"] elif "model-vision" in self.config: model = self.config["model-vision"] else: messages[-1]["content"] = messages[-1]["content"][0]["text"] try: # Prepare function calls if IGDB is enabled chat_kwargs = { "model": model, "messages": messages, } if self.igdb and self.config.get("enable-game-info", False): chat_kwargs["tools"] = [ {"type": "function", "function": func} for func in self.igdb.get_openai_functions() ] chat_kwargs["tool_choice"] = "auto" result = await openai_chat(self.client, **chat_kwargs) # Handle function calls if present message = result.choices[0].message # Check if we have function/tool calls and IGDB is enabled has_tool_calls = (hasattr(message, 'tool_calls') and message.tool_calls and self.igdb and self.config.get("enable-game-info", False)) if has_tool_calls: try: # Process function calls messages.append({ "role": "assistant", "content": message.content or "", "tool_calls": [tc.dict() if hasattr(tc, 'dict') else tc for tc in message.tool_calls] }) # Execute function calls for tool_call in message.tool_calls: function_name = tool_call.function.name function_args = json.loads(tool_call.function.arguments) # Execute IGDB function function_result = await self._execute_igdb_function(function_name, function_args) messages.append({ "role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(function_result) if function_result else "No results found" }) # Get final response after function execution final_result = await openai_chat(self.client, **chat_kwargs) answer_obj = final_result.choices[0].message except Exception as e: # If function calling fails, fall back to regular response logging.warning(f"Function calling failed, using regular response: {e}") answer_obj = message else: answer_obj = message answer = {"content": answer_obj.content, "role": answer_obj.role} self.rate_limit_backoff = exponential_backoff() logging.info(f"generated response {result.usage}: {repr(answer)}") return answer, limit except openai.BadRequestError as err: if "maximum context length is" in str(err) and limit > 4: logging.warning(f"context length exceeded, reduce the limit {limit}: {str(err)}") limit -= 1 return None, limit raise err except openai.RateLimitError as err: rate_limit_sleep = next(self.rate_limit_backoff) if "retry-model" in self.config: model = self.config["retry-model"] logging.warning(f"got an rate limit error, sleep for {rate_limit_sleep} seconds: {str(err)}") await asyncio.sleep(rate_limit_sleep) except Exception as err: logging.warning(f"failed to generate response: {repr(err)}") return None, limit async def fix(self, answer: str) -> str: if "fix-model" not in self.config: return answer messages = [{"role": "system", "content": self.config["fix-description"]}, {"role": "user", "content": answer}] try: result = await openai_chat(self.client, model=self.config["fix-model"], messages=messages) logging.info(f"got this message as fix:\n{pp(result.choices[0].message.content)}") response = result.choices[0].message.content start, end = response.find("{"), response.rfind("}") if start == -1 or end == -1 or (start + 3) >= end: return answer response = response[start : end + 1] logging.info(f"fixed answer:\n{pp(response)}") return response except Exception as err: logging.warning(f"failed to execute a fix for the answer: {repr(err)}") return answer async def translate(self, text: str, language: str = "english") -> str: if "fix-model" not in self.config: return text message = [ { "role": "system", "content": f"You are an professional translator to {language} language," f" you translate everything you get directly to {language}" f" if it is not already in {language}, otherwise you just copy it.", }, {"role": "user", "content": text}, ] try: result = await openai_chat(self.client, model=self.config["fix-model"], messages=message) response = result.choices[0].message.content logging.info(f"got this translated message:\n{pp(response)}") return response except Exception as err: logging.warning(f"failed to translate the text: {repr(err)}") return text async def memory_rewrite(self, memory: str, message_user: str, answer_user: str, question: str, answer: str) -> str: if "memory-model" not in self.config: return memory messages = [ {"role": "system", "content": self.config.get("memory-system", "You are an memory assistant.")}, { "role": "user", "content": f"Here is my previous memory:\n```\n{memory}\n```\n\n" f"Here is my conversanion:\n```\n{message_user}: {question}\n\n{answer_user}: {answer}\n```\n\n" f"Please rewrite the memory in a way, that it contain the content mentioned in conversation. " f"Summarize the memory if required, try to keep important information. " f"Write just new memory data without any comments.", }, ] logging.info(f"Rewrite memory:\n{pp(messages)}") try: # logging.info(f'send this memory request:\n{pp(messages)}') result = await openai_chat(self.client, model=self.config["memory-model"], messages=messages) new_memory = result.choices[0].message.content logging.info(f"new memory:\n{new_memory}") return new_memory except Exception as err: logging.warning(f"failed to create new memory: {repr(err)}") return memory async def _execute_igdb_function(self, function_name: str, function_args: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ Execute IGDB function calls from OpenAI. """ if not self.igdb: return None try: if function_name == "search_games": query = function_args.get("query", "") limit = function_args.get("limit", 5) if not query: return {"error": "No search query provided"} results = self.igdb.search_games(query, limit) if results: return {"games": results} else: return {"games": [], "message": f"No games found matching '{query}'"} elif function_name == "get_game_details": game_id = function_args.get("game_id") if not game_id: return {"error": "No game ID provided"} result = self.igdb.get_game_details(game_id) if result: return {"game": result} else: return {"error": f"Game with ID {game_id} not found"} else: return {"error": f"Unknown function: {function_name}"} except Exception as e: logging.error(f"Error executing IGDB function {function_name}: {e}") return {"error": f"Failed to execute {function_name}: {str(e)}"}