From 9b167c8c05b747b9ba64bbe440a339633377f828 Mon Sep 17 00:00:00 2001 From: Gabo Date: Tue, 1 Oct 2024 12:01:02 +0200 Subject: [PATCH] Fix logger --- Makefile | 7 +++++-- dev-requirements.txt | 4 +++- requirements.txt | 4 +--- src/run_it.py | 24 +++++++++++------------- src/start_queue_processor.py | 7 ++++--- src/translate.py | 3 ++- 6 files changed, 26 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index 0aac3c5..366db08 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ install: - . .venv/bin/activate; pip install -Ur requirements.txt + . .venv/bin/activate; pip install -Ur dev-requirements.txt activate: . .venv/bin/activate @@ -7,7 +7,7 @@ activate: install_venv: python3 -m venv .venv . .venv/bin/activate; python -m pip install --upgrade pip - . .venv/bin/activate; python -m pip install -r requirements.txt + . .venv/bin/activate; python -m pip install -r dev-requirements.txt formatter: . .venv/bin/activate; command black --line-length 125 . @@ -55,3 +55,6 @@ free_up_space: sudo rm -rf /opt/hostedtoolcache/CodeQL sudo docker image prune --all --force df -h + +run_it: + . .venv/bin/activate; command python src/run_it.py \ No newline at end of file diff --git a/dev-requirements.txt b/dev-requirements.txt index e2a060b..555b628 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -10,4 +10,6 @@ torchvision torchaudio tiktoken accelerate -pip-upgrader==1.4.15 \ No newline at end of file +pip-upgrader==1.4.15 +pytest==8.2.2 +black==24.4.2 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index a3a1857..f7696be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,6 @@ pydantic==2.8.2 ollama==0.2.1 graypy==2.1.0 -pytest==8.2.2 -black==24.4.2 sentry_sdk==1.44.0 -git+https://github.com/huridocs/ml-cloud-connector.git@e6751aa2f519b1f4344a4e81a51b9c97fbbc6c7b +git+https://github.com/huridocs/ml-cloud-connector.git@85e0ce5aad8d7428d15343188975b29e8f1d904e git+https://github.com/huridocs/queue-processor@bab1f4419b0768df518d06795afd5df2ba0e331c diff --git a/src/run_it.py b/src/run_it.py index d95157d..308505f 100644 --- a/src/run_it.py +++ b/src/run_it.py @@ -1,20 +1,18 @@ from time import time -from ml_cloud_connector.MlCloudConnector import MlCloudConnector -from data_model.TranslationTask import TranslationTask -from translate import get_translation +from data_model.TranslationResponseMessage import TranslationResponseMessage +from data_model.TranslationTaskMessage import TranslationTaskMessage +from start_queue_processor import process if __name__ == "__main__": start = time() print("start") - text = ( - "While there exists a rich body of work on video prediction using generative models, " - "the design of methods for evaluating the quality of the videos has received much less attention." - ) - language_from = "English" - language_to = "French" - translation_task = TranslationTask(text=text, language_from=language_from, language_to=language_to) - connector = MlCloudConnector("translation") - translation, finished, error = connector.execute(get_translation, connector.service_logger, translation_task) - print(translation) + text = "While there exists a rich body of work on video prediction using generative models, " + text += "the design of methods for evaluating the quality of the videos has received much less attention." + + translation_task_message = TranslationTaskMessage(key="key", text=text, language_from="English", languages_to=["French"]) + + results = process(translation_task_message.model_dump()) + translation_response_message = TranslationResponseMessage(**results) + print(translation_response_message.model_dump()) print("time", round(time() - start, 2), "s") diff --git a/src/start_queue_processor.py b/src/start_queue_processor.py index 62c9418..aa72b21 100644 --- a/src/start_queue_processor.py +++ b/src/start_queue_processor.py @@ -1,6 +1,7 @@ import os from ml_cloud_connector.MlCloudConnector import MlCloudConnector +from ml_cloud_connector.ServerType import ServerType from pydantic_core._pydantic_core import ValidationError from queue_processor.QueueProcessor import QueueProcessor @@ -37,12 +38,12 @@ def get_translation_from_task(translation_task: TranslationTask): if not translation_task.text.strip(): return get_empty_translation(translation_task) - connector = MlCloudConnector("translation") - translation, finished, error = connector.execute(get_translation, service_logger, translation_task) + ml_connector = MlCloudConnector(ServerType.TRANSLATION, service_logger) + translation, finished, error = ml_connector.execute_on_cloud_server(get_translation, service_logger, translation_task) return translation if finished else get_error_translation(translation_task, error) -def process(message): +def process(message: dict[any, any]) -> dict[any, any] | None: try: task_message = TranslationTaskMessage(**message) service_logger.info(f"New task {task_message.model_dump()}") diff --git a/src/translate.py b/src/translate.py index 5a73089..b7b46cd 100644 --- a/src/translate.py +++ b/src/translate.py @@ -1,4 +1,5 @@ from ml_cloud_connector.MlCloudConnector import MlCloudConnector +from ml_cloud_connector.ServerType import ServerType from ollama import Client from data_model.Translation import Translation @@ -26,7 +27,7 @@ def get_content(translation_task: TranslationTask): def get_translation(translation_task: TranslationTask) -> Translation: - ip_address = MlCloudConnector("translation").get_ip() + ip_address = MlCloudConnector(ServerType.TRANSLATION, service_logger).get_ip() client = Client(host=f"http://{ip_address}:{TRANSLATIONS_PORT}") service_logger.info(f"Using translation model {MODEL} on ip {ip_address}")