mirror of
https://gitflic.ru/project/alt-gnome/karkas.git
synced 2025-03-15 06:45:57 +03:00
222 lines
9.4 KiB
Python
222 lines
9.4 KiB
Python
|
import requests
|
|||
|
import json
|
|||
|
import asyncio
|
|||
|
import aiohttp
|
|||
|
|
|||
|
from ...standard.database import *
|
|||
|
from ...standard.config.config import *
|
|||
|
|
|||
|
|
|||
|
class YandexGPT:
|
|||
|
self.token = None
|
|||
|
self.catalog_id = None
|
|||
|
self.language = {
|
|||
|
"ru": "русский язык",
|
|||
|
"en": "английский язык",
|
|||
|
"de": "немецкий язык",
|
|||
|
"uk": "украинский язык",
|
|||
|
"es": "испанский язык",
|
|||
|
"be": "белорусский язык",
|
|||
|
}
|
|||
|
|
|||
|
def __init__(self, token, catalog_id):
|
|||
|
self.token = token
|
|||
|
self.catalog_id = catalog_id
|
|||
|
|
|||
|
async def async_token_check(self, messages, gpt, max_tokens, del_msg_id=1):
|
|||
|
url = "https://llm.api.cloud.yandex.net/foundationModels/v1/tokenize"
|
|||
|
while True:
|
|||
|
text = ""
|
|||
|
for message in messages:
|
|||
|
text += message["text"]
|
|||
|
try:
|
|||
|
response = requests.post(url, json={"model": gpt, "text": text})
|
|||
|
except Exception as e: # TODO: Переделать обработку ошибок
|
|||
|
print(e)
|
|||
|
continue
|
|||
|
if int(response.text) < max_tokens - 2000:
|
|||
|
break
|
|||
|
else:
|
|||
|
messages.pop(del_msg_id)
|
|||
|
return messages
|
|||
|
|
|||
|
async def async_request(*, url, headers, prompt) -> dict:
|
|||
|
async with aiohttp.ClientSession() as session:
|
|||
|
async with session.post(url, headers=headers, json=prompt) as response:
|
|||
|
return await response.json()
|
|||
|
|
|||
|
async def async_yandexgpt_lite(self, system_prompt, input_messages, stream=False, temperature=0.6, max_tokens=8000):
|
|||
|
url = "https://llm.api.cloud.yandex.net/foundationModels/v1/completion"
|
|||
|
gpt = f"gpt://{self.catalog_id}/yandexgpt-lite/latest"
|
|||
|
headers = {
|
|||
|
"Content-Type": "application/json",
|
|||
|
"Authorization": f"Api-Key {self.token}"
|
|||
|
}
|
|||
|
|
|||
|
messages = [{"role": "system", "text": system_prompt}]
|
|||
|
for message in input_messages:
|
|||
|
messages.append(message)
|
|||
|
messages = await self.async_token_check(messages, gpt, max_tokens)
|
|||
|
|
|||
|
prompt = {
|
|||
|
"modelUri": gpt,
|
|||
|
"completionOptions": {
|
|||
|
"stream": stream,
|
|||
|
"temperature": temperature,
|
|||
|
"maxTokens": max_tokens
|
|||
|
},
|
|||
|
"messages": messages
|
|||
|
}
|
|||
|
|
|||
|
response = requests.post(url, headers=headers, json=prompt).text
|
|||
|
return json.loads(response)["result"]["alternatives"][0]["message"]["text"]
|
|||
|
|
|||
|
async def async_yandexgpt(self, system_prompt, input_messages, stream=False, temperature=0.6, max_tokens=8000):
|
|||
|
url = "https://llm.api.cloud.yandex.net/foundationModels/v1/completion"
|
|||
|
gpt = f"gpt://{self.catalog_id}/yandexgpt/latest"
|
|||
|
headers = {
|
|||
|
"Content-Type": "application/json",
|
|||
|
"Authorization": f"Api-Key {self.token}"
|
|||
|
}
|
|||
|
|
|||
|
messages = [{"role": "system", "text": system_prompt}]
|
|||
|
for message in input_messages:
|
|||
|
messages.append(message)
|
|||
|
messages = await self.async_token_check(messages, gpt, max_tokens)
|
|||
|
|
|||
|
prompt = {
|
|||
|
"modelUri": gpt,
|
|||
|
"completionOptions": {
|
|||
|
"stream": stream,
|
|||
|
"temperature": temperature,
|
|||
|
"maxTokens": max_tokens
|
|||
|
},
|
|||
|
"messages": messages
|
|||
|
}
|
|||
|
|
|||
|
response = requests.post(url, headers=headers, json=prompt).text
|
|||
|
return json.loads(response)["result"]["alternatives"][0]["message"]["text"]
|
|||
|
|
|||
|
async def async_yandexgpt_translate(self, input_language, output_language, text):
|
|||
|
input_language = self.languages[input_language]
|
|||
|
output_language = self.languages[output_language]
|
|||
|
|
|||
|
return await self.async_yandexgpt(
|
|||
|
f"Переведи на {output_language} сохранив оригинальный смысл текста. Верни только результат:",
|
|||
|
[{"role": "user", "text": text}],
|
|||
|
stream=False, temperature=0.6, max_tokens=8000
|
|||
|
)
|
|||
|
|
|||
|
async def async_yandexgpt_spelling_check(self, input_language, text):
|
|||
|
input_language = self.languages[input_language]
|
|||
|
|
|||
|
return await self.async_yandexgpt(
|
|||
|
f"Проверьте орфографию и пунктуацию текста на {input_language}. Верни исправленный текст "
|
|||
|
f"без смысловых искажений:",
|
|||
|
[{"role": "user", "text": text}],
|
|||
|
stream=False, temperature=0.6, max_tokens=8000
|
|||
|
)
|
|||
|
|
|||
|
async def async_yandexgpt_text_history(self, messages, stream=False, temperature=0.6, max_tokens=8000):
|
|||
|
url = "https://llm.api.cloud.yandex.net/foundationModels/v1/completion"
|
|||
|
gpt = f"gpt://{self.catalog_id}/summarization/latest"
|
|||
|
headers = {
|
|||
|
"Content-Type": "application/json",
|
|||
|
"Authorization": f"Api-Key {self.token}"
|
|||
|
}
|
|||
|
|
|||
|
messages = []
|
|||
|
for message in input_messages:
|
|||
|
messages.append(message)
|
|||
|
messages = await self.async_token_check(messages, gpt, max_tokens, del_msg_id=0)
|
|||
|
|
|||
|
prompt = {
|
|||
|
"modelUri": gpt,
|
|||
|
"completionOptions": {
|
|||
|
"stream": stream,
|
|||
|
"temperature": temperature,
|
|||
|
"maxTokens": max_tokens
|
|||
|
},
|
|||
|
"messages": messages
|
|||
|
}
|
|||
|
|
|||
|
response = requests.post(url, headers=headers, json=prompt).text
|
|||
|
return json.loads(response)["result"]["alternatives"][0]["message"]["text"]
|
|||
|
|
|||
|
async def async_yandex_cloud_text_to_speech(self, text, voice, emotion, speed, format, quality):
|
|||
|
tts = "tts.api.cloud.yandex.net/speech/v1/tts:synthesize"
|
|||
|
# TODO: Сделать функцию TTS
|
|||
|
return 0
|
|||
|
|
|||
|
async def async_yandex_cloud_vision(self, image, features, language):
|
|||
|
# TODO: Сделать функцию Vision
|
|||
|
return 0
|
|||
|
|
|||
|
async def collect_messages(self, message_id, chat_id):
|
|||
|
messages = []
|
|||
|
# Собираем цепочку сообщений в формате: [{"role": "user", "text": "<Имя_пользователя>: Привет!"},
|
|||
|
# {"role": "assistant", "text": "Привет!"}]
|
|||
|
while True:
|
|||
|
message = get_message_text(chat_id, message_id)
|
|||
|
if get_message_ai_model(chat_id, start_message_id) != None:
|
|||
|
messages.append({"role": "assistant", "text": message})
|
|||
|
else:
|
|||
|
sender_name = get_user_name(get_message_sender_id(chat_id, start_message_id))
|
|||
|
messages.append({"role": "user", "text": sender_name + ": " + message})
|
|||
|
message_id = get_message_answer_to_message_id(chat_id, message_id)
|
|||
|
if message_id is None:
|
|||
|
break
|
|||
|
return messages.reverse()
|
|||
|
|
|||
|
async def collecting_messages_for_history(self, start_message_id, end_message_id, chat_id):
|
|||
|
messages = []
|
|||
|
# Собираем цепочку сообщений в формате: [{"role": "user", "text": "<Имя_пользователя>: Привет!"},
|
|||
|
# {"role": "assistant", "text": "Привет!"}]
|
|||
|
while True:
|
|||
|
message = get_message_text(chat_id, start_message_id)
|
|||
|
if get_message_ai_model(chat_id, start_message_id) != None:
|
|||
|
messages.append({"role": "assistant", "text": message})
|
|||
|
else:
|
|||
|
sender_name = get_user_name(get_message_sender_id(chat_id, start_message_id))
|
|||
|
messages.append({"role": "user", "text": sender_name + ": " + message})
|
|||
|
start_message_id -= 1
|
|||
|
if start_message_id <= end_message_id:
|
|||
|
break
|
|||
|
return messages.reverse()
|
|||
|
|
|||
|
async def yandexgpt_request(self, message_id = None, type = "yandexgpt-lite", chat_id = None,
|
|||
|
message_id_end = None):
|
|||
|
if type == "yandexgpt-lite":
|
|||
|
messages = await self.collect_messages(message_id, chat_id)
|
|||
|
return await self.async_yandexgpt_lite(
|
|||
|
system_prompt=get_yandexgpt_prompt(),
|
|||
|
input_messages=messages,
|
|||
|
stream=False, temperature=0.6, max_tokens=8000
|
|||
|
)
|
|||
|
elif type == "yandexgpt":
|
|||
|
messages = await self.collect_messages(message_id, chat_id)
|
|||
|
return await self.async_yandexgpt(
|
|||
|
system_prompt=get_yandexgpt_prompt(),
|
|||
|
input_messages=messages,
|
|||
|
stream=False, temperature=0.6, max_tokens=8000
|
|||
|
)
|
|||
|
elif type == "yandexgpt-translate":
|
|||
|
return await self.async_yandexgpt_translate(
|
|||
|
input_language=get_message_language(chat_id, message_id),
|
|||
|
output_language=get_chat_language(chat_id),
|
|||
|
text=get_message_text(chat_id, message_id)
|
|||
|
)
|
|||
|
elif type == "yandexgpt-spelling-check":
|
|||
|
return await self.async_yandexgpt_spelling_check(
|
|||
|
input_language=get_message_language(chat_id, message_id),
|
|||
|
text=get_message_text(chat_id, message_id)
|
|||
|
)
|
|||
|
elif type == "yandexgpt-text-history":
|
|||
|
messages = await self.collect_messages_for_history(message_id, message_id_end, chat_id)
|
|||
|
return await self.async_yandexgpt_text_history(
|
|||
|
messages=messages,
|
|||
|
stream=False, temperature=0.6, max_tokens=8000
|
|||
|
)
|
|||
|
else:
|
|||
|
return "Ошибка: Неизвестный тип запроса | Error: Unknown request type"
|