From 0c7d1db5cd55fa68f7dff435c933b4ca5ae6e090 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Fri, 27 Oct 2023 15:29:19 -0700 Subject: [PATCH] Mock openai error responses (#950) * Add example tests and mock error responses * Set invalid api key in auth error test Co-authored-by: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> --- .../_mock_external_openai_server.py | 33 ++++++++++- tests/mlmodel_openai/conftest.py | 26 ++++++++- tests/mlmodel_openai/test_error.py | 58 +++++++++++++++++++ 3 files changed, 114 insertions(+), 3 deletions(-) create mode 100644 tests/mlmodel_openai/test_error.py diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index 438e4072d8..6bbf5d5003 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -28,6 +28,33 @@ # 3) This app runs on a separate thread meaning it won't block the test app. RESPONSES = { + "Invalid API key.": ( + {"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"}, + 401, + { + "error": { + "message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": "null", + "code": "invalid_api_key", + } + }, + ), + "Model does not exist.": ( + { + "Content-Type": "application/json", + "x-request-id": "cfdf51fb795362ae578c12a21796262c", + }, + 404, + { + "error": { + "message": "The model `does-not-exist` does not exist", + "type": "invalid_request_error", + "param": "null", + "code": "model_not_found", + } + }, + ), "This is an embedding test.": ( { "Content-Type": "application/json", @@ -42,6 +69,7 @@ "x-ratelimit-reset-tokens": "2ms", "x-request-id": "c70828b2293314366a76a2b1dcb20688", }, + 200, { "data": [ { @@ -70,6 +98,7 @@ "x-ratelimit-reset-tokens": "90ms", "x-request-id": "49dbbffbd3c3f4612aa48def69059ccd", }, + 200, { "choices": [ { @@ -105,7 +134,7 @@ def simple_get(self): headers, response = ({}, "") for k, v in RESPONSES.items(): if prompt.startswith(k): - headers, response = v + headers, status_code, response = v break else: # If no matches found self.send_response(500) @@ -114,7 +143,7 @@ def simple_get(self): return # Send response code - self.send_response(200) + self.send_response(status_code) # Send headers for k, v in headers.items(): diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 0e59b3e970..4513be742d 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -90,6 +90,9 @@ def openai_server(): # Apply function wrappers to record data wrap_function_wrapper("openai.api_requestor", "APIRequestor.request", wrap_openai_api_requestor_request) + wrap_function_wrapper( + "openai.api_requestor", "APIRequestor._interpret_response", wrap_openai_api_requestor_interpret_response + ) yield # Run tests # Write responses to audit log @@ -101,6 +104,23 @@ def openai_server(): RECORDED_HEADERS = set(["x-request-id", "content-type"]) +def wrap_openai_api_requestor_interpret_response(wrapped, instance, args, kwargs): + rbody, rcode, rheaders = bind_request_interpret_response_params(*args, **kwargs) + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + rheaders.items(), + ) + ) + + if rcode >= 400 or rcode < 200: + rbody = json.loads(rbody) + OPENAI_AUDIT_LOG_CONTENTS["error"] = headers, rcode, rbody # Append response data to audit log + return wrapped(*args, **kwargs) + + def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): params = bind_request_params(*args, **kwargs) if not params: @@ -124,9 +144,13 @@ def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): ) # Log response - OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log + OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, result.http_status, data # Append response data to audit log return result def bind_request_params(method, url, params=None, *args, **kwargs): return params + + +def bind_request_interpret_response_params(result, stream): + return result.content.decode("utf-8"), result.status_code, result.headers diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_error.py new file mode 100644 index 0000000000..e8f5c31518 --- /dev/null +++ b/tests/mlmodel_openai/test_error.py @@ -0,0 +1,58 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest + +from newrelic.api.background_task import background_task + +enabled_ml_settings = { + "machine_learning.enabled": True, + "machine_learning.inference_events_value.enabled": True, + "ml_insights_events.enabled": True, +} + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + + +@background_task() +def test_invalid_request_error_model_does_not_exist(): + with pytest.raises(openai.InvalidRequestError): + openai.ChatCompletion.create( + model="does-not-exist", + messages=( + {"role": "system", "content": "Model does not exist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, + ), + temperature=0.7, + max_tokens=100, + ) + + +@background_task() +def test_authentication_error_invalid_api_key(monkeypatch): + monkeypatch.setattr(openai, "api_key", "InvalidKey") + with pytest.raises(openai.error.AuthenticationError): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=( + {"role": "system", "content": "Invalid API key."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, + ), + temperature=0.7, + max_tokens=100, + )