Skip to content

Commit

Permalink
Merge pull request #189 from dbpunk-labs/feat/llama_new_action
Browse files Browse the repository at this point in the history
feat: move the prompt to role module
  • Loading branch information
imotai authored Dec 13, 2023
2 parents 1792afe + 46f957a commit e931df1
Show file tree
Hide file tree
Showing 13 changed files with 205 additions and 106 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -159,3 +159,5 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

.cosine
5 changes: 4 additions & 1 deletion agent/src/og_agent/agent_api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,10 +178,13 @@ class TaskRequest(BaseModel):


async def run_task(task: TaskRequest, key):
async for respond in agent_sdk.prompt(task.prompt, key, files=task.input_files, context_id=task.context_id):
async for respond in agent_sdk.prompt(
task.prompt, key, files=task.input_files, context_id=task.context_id
):
response = StepResponse.new_from(respond).model_dump(exclude_none=True)
yield "data: %s\n" % json.dumps(response)


@app.post("/process")
async def process_task(
task: TaskRequest,
Expand Down
6 changes: 4 additions & 2 deletions agent/src/og_agent/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,17 @@ class TypingState:
MESSAGE = 4
OTHER = 5


class BaseAgent:

def __init__(self, sdk):
self.kernel_sdk = sdk
self.model_name = ""
self.agent_memories = {}

def create_new_memory_with_default_prompt(self, user_name, user_id, actions = ACTIONS):
def create_new_memory_with_default_prompt(
self, user_name, user_id, actions=ACTIONS
):
"""
create a new memory for the user
"""
Expand Down Expand Up @@ -386,7 +389,6 @@ async def extract_message(
response_token_count + context_output_token_count
)
if is_json_format:

(
new_text_content,
new_code_content,
Expand Down
87 changes: 28 additions & 59 deletions agent/src/og_agent/llama_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,51 +36,6 @@ def _output_exception(self):
"Sorry, the LLM did return nothing, You can use a better performance model"
)


def _format_output(self, json_response):
"""
format the response and send it to the user
"""
answer = json_response["explanation"]
if json_response["action"] == "no_action":
return answer
elif json_response["action"] == "show_sample_code":
return ""
else:
code = json_response.get("code", None)
answer_code = """%s
```%s
%s
```
""" % (
answer,
json_response.get("language", "python"),
code if code else "",
)
return answer_code

async def handle_show_sample_code(
self, json_response, queue, context, task_context
):
code = json_response["code"]
explanation = json_response["explanation"]
saved_filenames = json_response.get("saved_filenames", [])
tool_input = json.dumps({
"code": code,
"explanation": explanation,
"saved_filenames": saved_filenames,
"language": json_response.get("language", "text"),
})
await queue.put(
TaskResponse(
state=task_context.to_context_state_proto(),
response_type=TaskResponse.OnStepActionStart,
on_step_action_start=OnStepActionStart(
input=tool_input, tool="show_sample_code"
),
)
)

async def handle_bash_code(
self, json_response, queue, context, task_context, task_opt
):
Expand Down Expand Up @@ -130,7 +85,7 @@ async def handle_python_function(
state=task_context.to_context_state_proto(),
response_type=TaskResponse.OnStepActionStart,
on_step_action_start=OnStepActionStart(
input=tool_input, tool='execute'
input=tool_input, tool="execute"
),
)
)
Expand Down Expand Up @@ -176,10 +131,10 @@ async def arun(self, request, queue, context, task_opt):
context_id = (
request.context_id
if request.context_id
else self.create_new_memory_with_default_prompt("", "", actions=[FUNCTION_EXECUTE,
FUNCTION_DIRECT_MESSAGE])
else self.create_new_memory_with_default_prompt(
"", "", actions=[FUNCTION_EXECUTE, FUNCTION_DIRECT_MESSAGE]
)
)

if context_id not in self.agent_memories:
await queue.put(
TaskResponse(
Expand All @@ -190,7 +145,6 @@ async def arun(self, request, queue, context, task_opt):
)
)
return

agent_memory = self.agent_memories[context_id]
agent_memory.update_options(self.memory_option)
agent_memory.append_chat_message(
Expand Down Expand Up @@ -253,16 +207,23 @@ async def arun(self, request, queue, context, task_opt):
break
logger.debug(f" llama response {json_response}")
if (
'function_call'in json_response and json_response["function_call"] == "execute"
"function_call" in json_response
and json_response["function_call"] == "execute"
):
agent_memory.append_chat_message(message)
tools_mapping = {
"python": self.handle_python_function,
"bash": self.handle_bash_code,
}

function_result = await tools_mapping[json_response["arguments"]['language']](
json_response['arguments'], queue, context, task_context, task_opt
function_result = await tools_mapping[
json_response["arguments"]["language"]
](
json_response["arguments"],
queue,
context,
task_context,
task_opt,
)

logger.debug(f"the function result {function_result}")
Expand All @@ -287,23 +248,31 @@ async def arun(self, request, queue, context, task_opt):
"role": "user",
"content": f"{action_output} \n {function_result.console_stdout}",
})
agent_memory.append_chat_message({"role": "user", "content": current_question})
agent_memory.append_chat_message(
{"role": "user", "content": current_question}
)
elif function_result.has_error:
agent_memory.append_chat_message({
"role": "user",
"content": f"{action_output} \n {function_result.console_stderr}",
})
current_question = f"Generate a new step to fix the above error"
agent_memory.append_chat_message({"role": "user", "content": current_question})
agent_memory.append_chat_message(
{"role": "user", "content": current_question}
)
else:
agent_memory.append_chat_message({
"role": "user",
"content": f"{action_output} \n {function_result.console_stdout}",
})
agent_memory.append_chat_message({
"role": "user", "content": current_question})
elif 'function_call' in json_response and json_response["function_call"] == "direct_message":
message = json_response['arguments']['message']
agent_memory.append_chat_message(
{"role": "user", "content": current_question}
)
elif (
"function_call" in json_response
and json_response["function_call"] == "direct_message"
):
message = json_response["arguments"]["message"]
await queue.put(
TaskResponse(
state=task_context.to_context_state_proto(),
Expand Down
2 changes: 1 addition & 1 deletion agent/src/og_agent/llama_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def __init__(self, endpoint, key, grammar):
super().__init__(endpoint + "/v1/chat/completions", key)
self.grammar = grammar

async def chat(self, messages, model, temperature=0, max_tokens=1024, stop=[]):
async def chat(self, messages, model, temperature=0, max_tokens=1024, stop=["\n"]):
data = {
"messages": messages,
"temperature": temperature,
Expand Down
80 changes: 39 additions & 41 deletions agent/src/og_agent/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,52 +17,50 @@
"Use `execute` action to execute any code and `direct_message` action to send message to user",
]

FUNCTION_EXECUTE= ActionDesc(
name="execute",
desc="This action executes code in your programming environment and returns the output",
parameters=json.dumps({
"type": "object",
"properties": {
"explanation": {
"type": "string",
"description": "the explanation about the code parameters",
},
"code": {
"type": "string",
"description": "the bash code to be executed",
},
"language": {
"type": "string",
"description": "the language of the code, only python and bash are supported",
},
"saved_filenames": {
"type": "array",
"items": {"type": "string"},
"description": "A list of filenames that were created by the code",
},
FUNCTION_EXECUTE = ActionDesc(
name="execute",
desc="This action executes code in your programming environment and returns the output",
parameters=json.dumps({
"type": "object",
"properties": {
"explanation": {
"type": "string",
"description": "the explanation about the code parameters",
},
"required": ["explanation", "code", "language"],
}),
)
"code": {
"type": "string",
"description": "the bash code to be executed",
},
"language": {
"type": "string",
"description": "the language of the code, only python and bash are supported",
},
"saved_filenames": {
"type": "array",
"items": {"type": "string"},
"description": "A list of filenames that were created by the code",
},
},
"required": ["explanation", "code", "language"],
}),
)

FUNCTION_DIRECT_MESSAGE= ActionDesc(
name="direct_message",
desc="This action sends a direct message to user.",
parameters=json.dumps({
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "the message will be sent to user",
},
FUNCTION_DIRECT_MESSAGE = ActionDesc(
name="direct_message",
desc="This action sends a direct message to user.",
parameters=json.dumps({
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "the message will be sent to user",
},
"required": ["message"],
}),
},
"required": ["message"],
}),
)

ACTIONS = [
FUNCTION_EXECUTE
]
ACTIONS = [FUNCTION_EXECUTE]

OUTPUT_FORMAT = """The output format must be a JSON format with the following fields:
* function_call: The name of the action
Expand Down
1 change: 0 additions & 1 deletion agent/tests/openai_agent_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,6 @@ async def test_openai_agent_call_execute_bash_code(mocker, kernel_sdk):
assert console_output[0].console_stdout == "hello world\n", "bad console output"



@pytest.mark.asyncio
async def test_openai_agent_call_execute_python_code(mocker, kernel_sdk):
kernel_sdk.connect()
Expand Down
20 changes: 20 additions & 0 deletions agent/tests/tokenizer_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# vim:fenc=utf-8

# SPDX-FileCopyrightText: 2023 imotai <[email protected]>
# SPDX-FileContributor: imotai
#
# SPDX-License-Identifier: Elastic-2.0

""" """

import logging
import io
from og_agent.tokenizer import tokenize

logger = logging.getLogger(__name__)


def test_parse_explanation():
arguments = """{"function_call":"execute", "arguments": {"explanation":"h"""
for token_state, token in tokenize(io.StringIO(arguments)):
logger.info(f"token_state: {token_state}, token: {token}")
1 change: 1 addition & 0 deletions roles/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# the role module
26 changes: 26 additions & 0 deletions roles/setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Copyright (C) 2023 dbpunk.com Author imotai <[email protected]>
# SPDX-FileCopyrightText: 2023 imotai <[email protected]>
# SPDX-FileContributor: imotai
#
# SPDX-License-Identifier: Elastic-2.0

""" """
from setuptools import setup, find_packages

setup(
name="og_roles",
version="0.3.6",
description="Open source llm agent service",
author="imotai",
author_email="[email protected]",
url="https://github.com/dbpunk-labs/octogen",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=[
"og_roles",
],
package_dir={
"og_roles": "src/og_roles",
},
package_data={},
)
Empty file added roles/src/og_roles/__init__.py
Empty file.
Loading

0 comments on commit e931df1

Please sign in to comment.