promptflow-release-testing-matrix #266
7 fail, 5 skipped, 1 047 pass in 1h 42m 59s
17 files 17 suites 1h 42m 59s ⏱️
1 059 tests 1 047 ✅ 5 💤 7 ❌
9 196 runs 9 085 ✅ 74 💤 37 ❌
Results for commit f214705.
Annotations
Check warning on line 0 in tests.executor.unittests.executor._service.apis.test_tool.TestToolApis
github-actions / Test Results
1 out of 9 runs failed: test_gen_tool_meta_all_completed (tests.executor.unittests.executor._service.apis.test_tool.TestToolApis)
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS macos-13)/test-results-executor-unit.xml [took 10s]
Raw output
assert 0 == 4
+ where 0 = len({})
self = <executor.unittests.executor._service.apis.test_tool.TestToolApis object at 0x10f085590>
executor_client = <starlette.testclient.TestClient object at 0x10f79aed0>
def test_gen_tool_meta_all_completed(self, executor_client: TestClient):
flow_folder = "web_classification"
tools = [
("fetch_text_content_from_url.py", "python"),
("prepare_examples.py", "python"),
("classify_with_llm.jinja2", "llm"),
("convert_to_dict.py", "python"),
]
request = construct_tool_meta_request_json(flow_folder, tools)
response = executor_client.post(url="/tool/meta", json=request)
# assert response
assert response.status_code == 200
tool_meta = response.json()
> assert len(tool_meta["tools"]) == 4
E assert 0 == 4
E + where 0 = len({})
/Users/runner/work/promptflow/promptflow/src/promptflow/tests/executor/unittests/executor/_service/apis/test_tool.py:44: AssertionError
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Test Results
All 8 runs failed: test_otel_trace_with_llm[openai_chat_api_flow-inputs0-False-3] (tests.executor.e2etests.test_traces.TestOTelTracer)
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS macos-13)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS macos-13)/test-results-executor-e2e.xml [took 4s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS windows-latest)/test-results-executor-e2e.xml [took 6s]
Raw output
Exception: An error occurred in the subprocess: AssertionError()
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
self.validate_openai_tokens(span_list, is_stream)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
assert token_name in span.attributes
AssertionError
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae405309d0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas...ai.azure.com/', 'api_key': 'f5e048efc74e45d19f596f36633be39c', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}
flow_file = 'openai_chat_api_flow'
inputs = {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': False}
is_stream = False, expected_span_length = 3
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 3),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 3),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:482:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae405309d0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...', {'chat_history': [], 'question': 'What is the capital of the United States of America?', 'stream': False}, False, 3)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7fae4c63eda0>
error_queue = <multiprocessing.queues.Queue object at 0x7fae3a807fd0>
process = <MockSpawnProcess name='MockSpawnProcess-35' pid=9593 parent=6184 stopped exitcode=0>
err = 'AssertionError()'
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...tests/test_traces.py", line 702, in validate_openai_tokens\n assert token_name in span.attributes\nAssertionError\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError()
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
E self.validate_openai_tokens(span_list, is_stream)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
E assert token_name in span.attributes
E AssertionError
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Test Results
All 8 runs failed: test_otel_trace_with_llm[openai_completion_api_flow-inputs2-False-3] (tests.executor.e2etests.test_traces.TestOTelTracer)
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS macos-13)/test-results-executor-e2e.xml [took 7s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS macos-13)/test-results-executor-e2e.xml [took 4s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
Raw output
Exception: An error occurred in the subprocess: AssertionError()
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
self.validate_openai_tokens(span_list, is_stream)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
assert token_name in span.attributes
AssertionError
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae40530af0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas...ai.azure.com/', 'api_key': 'f5e048efc74e45d19f596f36633be39c', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}
flow_file = 'openai_completion_api_flow'
inputs = {'prompt': 'What is the capital of the United States of America?', 'stream': False}
is_stream = False, expected_span_length = 3
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 3),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 3),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:482:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae40530af0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...ai_completion_api_flow', {'prompt': 'What is the capital of the United States of America?', 'stream': False}, False, 3)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7fae4c63eda0>
error_queue = <multiprocessing.queues.Queue object at 0x7fae40530fa0>
process = <MockSpawnProcess name='MockSpawnProcess-37' pid=9879 parent=6184 stopped exitcode=0>
err = 'AssertionError()'
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...tests/test_traces.py", line 702, in validate_openai_tokens\n assert token_name in span.attributes\nAssertionError\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError()
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
E self.validate_openai_tokens(span_list, is_stream)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
E assert token_name in span.attributes
E AssertionError
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Test Results
All 8 runs failed: test_otel_trace_with_llm[llm_tool-inputs4-False-4] (tests.executor.e2etests.test_traces.TestOTelTracer)
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS macos-13)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS macos-13)/test-results-executor-e2e.xml [took 4s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
Raw output
Exception: An error occurred in the subprocess: AssertionError()
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
self.validate_openai_tokens(span_list, is_stream)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
assert token_name in span.attributes
AssertionError
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae405312a0>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas...ai.azure.com/', 'api_key': 'f5e048efc74e45d19f596f36633be39c', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}
flow_file = 'llm_tool', inputs = {'stream': False, 'topic': 'Hello'}
is_stream = False, expected_span_length = 4
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 3),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 3),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:482:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae405312a0>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}, 'llm_tool', {'stream': False, 'topic': 'Hello'}, False, 4)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7fae4c63eda0>
error_queue = <multiprocessing.queues.Queue object at 0x7fae3b752ce0>
process = <MockSpawnProcess name='MockSpawnProcess-39' pid=10011 parent=6184 stopped exitcode=0>
err = 'AssertionError()'
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...tests/test_traces.py", line 702, in validate_openai_tokens\n assert token_name in span.attributes\nAssertionError\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError()
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
E self.validate_openai_tokens(span_list, is_stream)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
E assert token_name in span.attributes
E AssertionError
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
Check warning on line 0 in tests.executor.e2etests.test_traces.TestOTelTracer
github-actions / Test Results
All 8 runs failed: test_otel_trace_with_llm[flow_with_async_llm_tasks-inputs5-False-6] (tests.executor.e2etests.test_traces.TestOTelTracer)
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS macos-13)/test-results-executor-e2e.xml [took 3s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.10) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 2s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS macos-13)/test-results-executor-e2e.xml [took 3s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 3s]
artifacts/promptflow_executor_tests Test Results (Python 3.9) (OS windows-latest)/test-results-executor-e2e.xml [took 5s]
Raw output
Exception: An error occurred in the subprocess: AssertionError()
Stacktrace:
Traceback (most recent call last):
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
func(*args, **kwargs)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
self.validate_openai_tokens(span_list, is_stream)
File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
assert token_name in span.attributes
AssertionError
self = <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae40532830>
dev_connections = {'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_bas...ai.azure.com/', 'api_key': 'f5e048efc74e45d19f596f36633be39c', 'api_type': 'azure', 'api_version': '2024-02-01'}}, ...}
flow_file = 'flow_with_async_llm_tasks'
inputs = {'chat_history': [], 'models': ['gpt-35-turbo', 'gpt-35-turbo'], 'question': 'What is the capital of the United States of America?'}
is_stream = False, expected_span_length = 6
@pytest.mark.parametrize(
"flow_file, inputs, is_stream, expected_span_length",
[
("openai_chat_api_flow", get_chat_input(False), False, 3),
("openai_chat_api_flow", get_chat_input(True), True, 3),
("openai_completion_api_flow", get_completion_input(False), False, 3),
("openai_completion_api_flow", get_completion_input(True), True, 3),
("llm_tool", {"topic": "Hello", "stream": False}, False, 4),
("flow_with_async_llm_tasks", get_flow_sample_inputs("flow_with_async_llm_tasks"), False, 6),
],
)
def test_otel_trace_with_llm(
self,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
):
> execute_function_in_subprocess(
self.assert_otel_traces_with_llm,
dev_connections,
flow_file,
inputs,
is_stream,
expected_span_length,
)
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py:482:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
func = <bound method TestOTelTracer.assert_otel_traces_with_llm of <executor.e2etests.test_traces.TestOTelTracer object at 0x7fae40532830>>
args = ({'aoai_assistant_connection': {'module': 'promptflow.connections', 'type': 'AzureOpenAIConnection', 'value': {'api_ba...dels': ['gpt-35-turbo', 'gpt-35-turbo'], 'question': 'What is the capital of the United States of America?'}, False, 6)
kwargs = {}
ctx = <multiprocessing.context.SpawnContext object at 0x7fae4c63eda0>
error_queue = <multiprocessing.queues.Queue object at 0x7fae3b04b520>
process = <MockSpawnProcess name='MockSpawnProcess-40' pid=10022 parent=6184 stopped exitcode=0>
err = 'AssertionError()'
stacktrace_str = 'Traceback (most recent call last):\n File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/pro...tests/test_traces.py", line 702, in validate_openai_tokens\n assert token_name in span.attributes\nAssertionError\n'
def execute_function_in_subprocess(func, *args, **kwargs):
"""
Execute a function in a new process and return any exception that occurs.
Replace pickle with dill for better serialization capabilities.
"""
ctx = get_context("spawn")
error_queue = ctx.Queue()
process = ctx.Process(target=_run_in_subprocess_with_recording, args=(error_queue, func, args, kwargs))
process.start()
process.join() # Wait for the process to finish
if not error_queue.empty():
err, stacktrace_str = error_queue.get()
> raise Exception(f"An error occurred in the subprocess: {err}\nStacktrace:\n{stacktrace_str}")
E Exception: An error occurred in the subprocess: AssertionError()
E Stacktrace:
E Traceback (most recent call last):
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py", line 15, in _run_in_subprocess
E func(*args, **kwargs)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 503, in assert_otel_traces_with_llm
E self.validate_openai_tokens(span_list, is_stream)
E File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py", line 702, in validate_openai_tokens
E assert token_name in span.attributes
E AssertionError
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/process_utils.py:42: Exception
github-actions / Test Results
2 out of 8 runs failed: test_executor_exec_line_fail_with_exception[sync_tools_failures-sync_fail-In tool raise_an_exception: dummy_input] (tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures)
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 0s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 0s]
Raw output
assert 16 == 17
+ where 16 = len(['Traceback (most recent call last):', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
+ and 17 = len(['Traceback (most recent call last):', 'sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', 'sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
self = <executor.e2etests.test_executor_execution_failures.TestExecutorFailures object at 0x7f218431c450>
flow_folder = 'sync_tools_failures', failed_node_name = 'sync_fail'
message = 'In tool raise_an_exception: dummy_input'
@pytest.mark.parametrize(
"flow_folder, failed_node_name, message",
[
("sync_tools_failures", "sync_fail", "In tool raise_an_exception: dummy_input"),
("async_tools_failures", "async_fail", "In tool raise_an_exception_async: dummy_input"),
],
)
def test_executor_exec_line_fail_with_exception(self, flow_folder, failed_node_name, message):
yaml_file = get_yaml_file(flow_folder)
# Here we set raise_ex to True to make sure the exception is raised and we can check the error detail.
executor = FlowExecutor.create(yaml_file, {}, raise_ex=True)
with pytest.raises(ToolExecutionError) as e:
executor.exec_line({})
ex = e.value
assert ex.error_codes == ["UserError", "ToolExecutionError"]
ex_str = str(ex)
assert ex_str.startswith(f"Execution failure in '{failed_node_name}'")
assert message in ex_str
expected_stack_trace = expected_stack_traces[flow_folder]
stacktrace = ex.tool_traceback.split("\n")
# Remove "^^^^^^^^" lines as they are not part of actual stack trace
stacktrace = [line for line in stacktrace if "^^^^^^^^" not in line]
> assert len(stacktrace) == len(expected_stack_trace)
E assert 16 == 17
E + where 16 = len(['Traceback (most recent call last):', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
E + and 17 = len(['Traceback (most recent call last):', 'sync_fail.py", line 11, in raise_an_exception', ' raise_exception(s)', 'sync_fail.py", line 5, in raise_exception', ' raise Exception(msg)', 'Exception: In raise_exception: dummy_input', ...])
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_executor_execution_failures.py:153: AssertionError
github-actions / Test Results
2 out of 8 runs failed: test_executor_exec_line_fail_with_exception[async_tools_failures-async_fail-In tool raise_an_exception_async: dummy_input] (tests.executor.e2etests.test_executor_execution_failures.TestExecutorFailures)
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS ubuntu-latest)/test-results-executor-e2e.xml [took 0s]
artifacts/promptflow_executor_tests Test Results (Python 3.11) (OS windows-latest)/test-results-executor-e2e.xml [took 0s]
Raw output
assert 16 == 17
+ where 16 = len(['Traceback (most recent call last):', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
+ and 17 = len(['Traceback (most recent call last):', 'async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', 'async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
self = <executor.e2etests.test_executor_execution_failures.TestExecutorFailures object at 0x7f218431c310>
flow_folder = 'async_tools_failures', failed_node_name = 'async_fail'
message = 'In tool raise_an_exception_async: dummy_input'
@pytest.mark.parametrize(
"flow_folder, failed_node_name, message",
[
("sync_tools_failures", "sync_fail", "In tool raise_an_exception: dummy_input"),
("async_tools_failures", "async_fail", "In tool raise_an_exception_async: dummy_input"),
],
)
def test_executor_exec_line_fail_with_exception(self, flow_folder, failed_node_name, message):
yaml_file = get_yaml_file(flow_folder)
# Here we set raise_ex to True to make sure the exception is raised and we can check the error detail.
executor = FlowExecutor.create(yaml_file, {}, raise_ex=True)
with pytest.raises(ToolExecutionError) as e:
executor.exec_line({})
ex = e.value
assert ex.error_codes == ["UserError", "ToolExecutionError"]
ex_str = str(ex)
assert ex_str.startswith(f"Execution failure in '{failed_node_name}'")
assert message in ex_str
expected_stack_trace = expected_stack_traces[flow_folder]
stacktrace = ex.tool_traceback.split("\n")
# Remove "^^^^^^^^" lines as they are not part of actual stack trace
stacktrace = [line for line in stacktrace if "^^^^^^^^" not in line]
> assert len(stacktrace) == len(expected_stack_trace)
E assert 16 == 17
E + where 16 = len(['Traceback (most recent call last):', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', ' File "/home/runner/work/promptflow/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
E + and 17 = len(['Traceback (most recent call last):', 'async_fail.py", line 11, in raise_an_exception_async', ' await raise_exception_async(s)', 'async_fail.py", line 5, in raise_exception_async', ' raise Exception(msg)', 'Exception: In raise_exception_async: dummy_input', ...])
/home/runner/work/promptflow/promptflow/src/promptflow/tests/executor/e2etests/test_executor_execution_failures.py:153: AssertionError