From 55a2a1e15e3d2a4acf32e5aa8c1e86121f9aab03 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 10 Jan 2025 09:51:10 -0500 Subject: [PATCH] use llama 3.3 for tool tests to address accuracy failures from 3.1 --- libs/ai-endpoints/tests/integration_tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index 47da5bf5..c6ef1fac 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -111,7 +111,7 @@ def get_all_known_models() -> List[Model]: metafunc.parametrize("chat_model", models, ids=models) if "tool_model" in metafunc.fixturenames: - models = ["meta/llama-3.1-8b-instruct"] + models = ["meta/llama-3.3-70b-instruct"] if model_list := metafunc.config.getoption("tool_model_id"): models = model_list if metafunc.config.getoption("all_models"):