Skip to content

Commit

Permalink
Add tests
Browse files Browse the repository at this point in the history
  • Loading branch information
hmstepanek committed Oct 20, 2023
1 parent 1123c05 commit 4619c90
Show file tree
Hide file tree
Showing 2 changed files with 160 additions and 0 deletions.
33 changes: 33 additions & 0 deletions tests/mlmodel_openai/_mock_external_openai_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,39 @@
"usage": {"completion_tokens": 11, "prompt_tokens": 53, "total_tokens": 64},
},
),
"You are a mathematician.": (
{
"Content-Type": "application/json",
"openai-model": "gpt-3.5-turbo-0613",
"openai-organization": "new-relic-nkmd8b",
"openai-processing-ms": "1469",
"openai-version": "2020-10-01",
"x-ratelimit-limit-requests": "200",
"x-ratelimit-limit-tokens": "40000",
"x-ratelimit-remaining-requests": "199",
"x-ratelimit-remaining-tokens": "39940",
"x-ratelimit-reset-requests": "7m12s",
"x-ratelimit-reset-tokens": "90ms",
"x-request-id": "49dbbffbd3c3f4612aa48def69059aad",
},
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "1 plus 2 is 3.",
"role": "assistant",
},
}
],
"created": 1696888865,
"id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat",
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion",
"usage": {"completion_tokens": 11, "prompt_tokens": 53, "total_tokens": 64},
},
),
}


Expand Down
127 changes: 127 additions & 0 deletions tests/mlmodel_openai/test_get_ai_message_ids.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import openai
from testing_support.fixtures import reset_core_stats_engine

from newrelic.api.background_task import background_task
from newrelic.api.ml_model import get_ai_message_ids
from newrelic.api.transaction import add_custom_attribute, current_transaction

_test_openai_chat_completion_messages_1 = (
{"role": "system", "content": "You are a scientist."},
{"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"},
)
_test_openai_chat_completion_messages_2 = (
{"role": "system", "content": "You are a mathematician."},
{"role": "user", "content": "What is 1 plus 2?"},
)
expected_message_ids_1 = [
{
"conversation_id": "my-awesome-id",
"request_id": "49dbbffbd3c3f4612aa48def69059ccd",
"message_id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-0",
},
{
"conversation_id": "my-awesome-id",
"request_id": "49dbbffbd3c3f4612aa48def69059ccd",
"message_id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-1",
},
{
"conversation_id": "my-awesome-id",
"request_id": "49dbbffbd3c3f4612aa48def69059ccd",
"message_id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-2",
},
]
expected_message_ids_2 = [
{
"conversation_id": "my-awesome-id",
"request_id": "49dbbffbd3c3f4612aa48def69059aad",
"message_id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat-0",
},
{
"conversation_id": "my-awesome-id",
"request_id": "49dbbffbd3c3f4612aa48def69059aad",
"message_id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat-1",
},
{
"conversation_id": "my-awesome-id",
"request_id": "49dbbffbd3c3f4612aa48def69059aad",
"message_id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat-2",
},
]


@reset_core_stats_engine()
@background_task()
def test_get_ai_message_ids_when_nr_message_ids_not_set():
message_ids = get_ai_message_ids("request-id-1")
assert message_ids == []


@reset_core_stats_engine()
def test_get_ai_message_ids_outside_transaction():
message_ids = get_ai_message_ids("request-id-1")
assert message_ids == []


@reset_core_stats_engine()
@background_task()
def test_get_ai_message_ids_mulitple_async(loop, set_trace_info):
set_trace_info()
add_custom_attribute("conversation_id", "my-awesome-id")

async def _run():
res1 = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages_1, temperature=0.7, max_tokens=100
)
res2 = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages_2, temperature=0.7, max_tokens=100
)
return [res1, res2]

results = loop.run_until_complete(_run())

message_ids = [m for m in get_ai_message_ids(results[0].id)]
assert message_ids == expected_message_ids_1

message_ids = [m for m in get_ai_message_ids(results[1].id)]
assert message_ids == expected_message_ids_2

# Make sure we aren't causing a memory leak.
transaction = current_transaction()
assert not transaction._nr_message_ids


@reset_core_stats_engine()
@background_task()
def test_get_ai_message_ids_mulitple_sync(set_trace_info):
set_trace_info()
add_custom_attribute("conversation_id", "my-awesome-id")

results = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages_1, temperature=0.7, max_tokens=100
)
message_ids = [m for m in get_ai_message_ids(results.id)]
assert message_ids == expected_message_ids_1

results = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages_2, temperature=0.7, max_tokens=100
)
message_ids = [m for m in get_ai_message_ids(results.id)]
assert message_ids == expected_message_ids_2

# Make sure we aren't causing a memory leak.
transaction = current_transaction()
assert not transaction._nr_message_ids

0 comments on commit 4619c90

Please sign in to comment.