Remove deprecated OpenAI parameters for GPT-5 compatibility

- Remove temperature, max_tokens, top_p, presence_penalty, frequency_penalty parameters
- These parameters are no longer supported in GPT-5 models
- Update all openai_chat calls to use only model and messages parameters
- Fix openai-key vs openai-token configuration key compatibility

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
OK 2025-08-08 19:18:06 +02:00
parent fbec05dfe9
commit be8298f015

View File

@ -26,7 +26,7 @@ async def openai_image(client, *args, **kwargs):
class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn): class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn):
def __init__(self, config: Dict[str, Any], channel: Optional[str] = None) -> None: def __init__(self, config: Dict[str, Any], channel: Optional[str] = None) -> None:
super().__init__(config, channel) super().__init__(config, channel)
self.client = openai.AsyncOpenAI(api_key=self.config["openai-token"]) self.client = openai.AsyncOpenAI(api_key=self.config.get("openai-token", self.config.get("openai-key", "")))
async def draw_openai(self, description: str) -> BytesIO: async def draw_openai(self, description: str) -> BytesIO:
for _ in range(3): for _ in range(3):
@ -50,11 +50,6 @@ class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn):
self.client, self.client,
model=model, model=model,
messages=messages, messages=messages,
temperature=self.config["temperature"],
max_tokens=self.config["max-tokens"],
top_p=self.config["top-p"],
presence_penalty=self.config["presence-penalty"],
frequency_penalty=self.config["frequency-penalty"],
) )
answer_obj = result.choices[0].message answer_obj = result.choices[0].message
answer = {"content": answer_obj.content, "role": answer_obj.role} answer = {"content": answer_obj.content, "role": answer_obj.role}
@ -82,7 +77,7 @@ class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn):
return answer return answer
messages = [{"role": "system", "content": self.config["fix-description"]}, {"role": "user", "content": answer}] messages = [{"role": "system", "content": self.config["fix-description"]}, {"role": "user", "content": answer}]
try: try:
result = await openai_chat(self.client, model=self.config["fix-model"], messages=messages, temperature=0.2, max_tokens=2048) result = await openai_chat(self.client, model=self.config["fix-model"], messages=messages)
logging.info(f"got this message as fix:\n{pp(result.choices[0].message.content)}") logging.info(f"got this message as fix:\n{pp(result.choices[0].message.content)}")
response = result.choices[0].message.content response = result.choices[0].message.content
start, end = response.find("{"), response.rfind("}") start, end = response.find("{"), response.rfind("}")
@ -108,7 +103,7 @@ class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn):
{"role": "user", "content": text}, {"role": "user", "content": text},
] ]
try: try:
result = await openai_chat(self.client, model=self.config["fix-model"], messages=message, temperature=0.2, max_tokens=2048) result = await openai_chat(self.client, model=self.config["fix-model"], messages=message)
response = result.choices[0].message.content response = result.choices[0].message.content
logging.info(f"got this translated message:\n{pp(response)}") logging.info(f"got this translated message:\n{pp(response)}")
return response return response
@ -133,7 +128,7 @@ class OpenAIResponder(AIResponder, LeonardoAIDrawMixIn):
logging.info(f"Rewrite memory:\n{pp(messages)}") logging.info(f"Rewrite memory:\n{pp(messages)}")
try: try:
# logging.info(f'send this memory request:\n{pp(messages)}') # logging.info(f'send this memory request:\n{pp(messages)}')
result = await openai_chat(self.client, model=self.config["memory-model"], messages=messages, temperature=0.6, max_tokens=4096) result = await openai_chat(self.client, model=self.config["memory-model"], messages=messages)
new_memory = result.choices[0].message.content new_memory = result.choices[0].message.content
logging.info(f"new memory:\n{new_memory}") logging.info(f"new memory:\n{new_memory}")
return new_memory return new_memory