Skip to content

Commit

Permalink
Improve naming of models.py -> Feels like ML models, not datamodels. …
Browse files Browse the repository at this point in the history
…Use __init__ level so it's improted as .datamodel instead
  • Loading branch information
scosman committed Sep 30, 2024
1 parent 8ee6268 commit b9a0fc9
Show file tree
Hide file tree
Showing 8 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion libs/core/kiln_ai/adapters/base_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from abc import ABCMeta, abstractmethod
from typing import Dict

from kiln_ai.datamodel import Task
from kiln_ai.datamodel.json_schema import validate_schema
from kiln_ai.datamodel.models import Task


class BaseAdapter(metaclass=ABCMeta):
Expand Down
4 changes: 2 additions & 2 deletions libs/core/kiln_ai/adapters/langchain_adapters.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Dict

import kiln_ai.datamodel.models as models
import kiln_ai.datamodel as datamodel
from kiln_ai.adapters.prompt_builders import SimplePromptBuilder
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage, SystemMessage
Expand All @@ -13,7 +13,7 @@
class LangChainPromptAdapter(BaseAdapter):
def __init__(
self,
kiln_task: models.Task,
kiln_task: datamodel.Task,
custom_model: BaseChatModel | None = None,
model_name: str | None = None,
provider: str | None = None,
Expand Down
14 changes: 7 additions & 7 deletions libs/core/kiln_ai/adapters/test_prompt_adaptors.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
from pathlib import Path

import kiln_ai.datamodel.models as models
import kiln_ai.datamodel as datamodel
import pytest
from kiln_ai.adapters.langchain_adapters import LangChainPromptAdapter
from kiln_ai.adapters.ml_model_list import built_in_models, ollama_online
Expand Down Expand Up @@ -95,30 +95,30 @@ async def test_all_built_in_models(tmp_path):


def build_test_task(tmp_path: Path):
project = models.Project(name="test", path=tmp_path / "test.kiln")
project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
project.save_to_file()
assert project.name == "test"

task = models.Task(parent=project, name="test task")
task = datamodel.Task(parent=project, name="test task")
task.save_to_file()
assert task.name == "test task"
task.instruction = (
"You are an assistant which performs math tasks provided in plain text."
)

r1 = models.TaskRequirement(
r1 = datamodel.TaskRequirement(
parent=task,
name="BEDMAS",
instruction="You follow order of mathematical operation (BEDMAS)",
)
r1.save_to_file()
r2 = models.TaskRequirement(
r2 = datamodel.TaskRequirement(
parent=task,
name="only basic math",
instruction="If the problem has anything other than addition, subtraction, multiplication, division, and brackets, you will not answer it. Reply instead with 'I'm just a basic calculator, I don't know how to do that'.",
)
r2.save_to_file()
r3 = models.TaskRequirement(
r3 = datamodel.TaskRequirement(
parent=task,
name="Answer format",
instruction="The answer can contain any content about your reasoning, but at the end it should include the final answer in numerals in square brackets. For example if the answer is one hundred, the end of your response should be [100].",
Expand All @@ -133,7 +133,7 @@ async def run_simple_test(tmp_path: Path, model_name: str, provider: str | None
return await run_simple_task(task, model_name, provider)


async def run_simple_task(task: models.Task, model_name: str, provider: str):
async def run_simple_task(task: datamodel.Task, model_name: str, provider: str):
adapter = LangChainPromptAdapter(task, model_name=model_name, provider=provider)

answer = await adapter.invoke(
Expand Down
2 changes: 1 addition & 1 deletion libs/core/kiln_ai/adapters/test_prompt_builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from kiln_ai.adapters.prompt_builders import MultiShotPromptBuilder, SimplePromptBuilder
from kiln_ai.adapters.test_prompt_adaptors import build_test_task
from kiln_ai.adapters.test_structured_output import build_structured_output_test_task
from kiln_ai.datamodel.models import (
from kiln_ai.datamodel import (
Example,
ExampleOutput,
ExampleSource,
Expand Down
16 changes: 8 additions & 8 deletions libs/core/kiln_ai/adapters/test_structured_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import jsonschema
import jsonschema.exceptions
import kiln_ai.datamodel.models as models
import kiln_ai.datamodel as datamodel
import pytest
from kiln_ai.adapters.base_adapter import BaseAdapter
from kiln_ai.adapters.langchain_adapters import LangChainPromptAdapter
Expand Down Expand Up @@ -51,7 +51,7 @@ async def test_structured_output_ollama_llama(tmp_path):


class MockAdapter(BaseAdapter):
def __init__(self, kiln_task: models.Task, response: Dict | str | None):
def __init__(self, kiln_task: datamodel.Task, response: Dict | str | None):
super().__init__(kiln_task)
self.response = response

Expand Down Expand Up @@ -79,8 +79,8 @@ async def test_mock_unstructred_response(tmp_path):
answer = await adapter.invoke("You are a mock, send me the response!")

# Should error, expecting a string, not a dict
project = models.Project(name="test", path=tmp_path / "test.kiln")
task = models.Task(parent=project, name="test task")
project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
task = datamodel.Task(parent=project, name="test task")
task.instruction = (
"You are an assistant which performs math tasks provided in plain text."
)
Expand Down Expand Up @@ -112,9 +112,9 @@ async def test_all_built_in_models_structured_output(tmp_path):


def build_structured_output_test_task(tmp_path: Path):
project = models.Project(name="test", path=tmp_path / "test.kiln")
project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
project.save_to_file()
task = models.Task(
task = datamodel.Task(
parent=project,
name="test task",
instruction="You are an assistant which tells a joke, given a subject.",
Expand Down Expand Up @@ -148,9 +148,9 @@ async def run_structured_output_test(tmp_path: Path, model_name: str, provider:


def build_structured_input_test_task(tmp_path: Path):
project = models.Project(name="test", path=tmp_path / "test.kiln")
project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
project.save_to_file()
task = models.Task(
task = datamodel.Task(
parent=project,
name="test task",
instruction="You are an assistant which classifies a triangle given the lengths of its sides. If all sides are of equal length, the triangle is equilateral. If two sides are equal, the triangle is isosceles. Otherwise, it is scalene.\n\nAt the end of your response return the result in double square brackets. It should be plain text. It should be exactly one of the three following strings: '[[equilateral]]', or '[[isosceles]]', or '[[scalene]]'.",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from .json_schema import validate_schema

if TYPE_CHECKING:
from .models import Task
from . import Task

# Conventions:
# 1) Names are filename safe as they may be used as file names. They are informational and not to be used in prompts/training/validation.
Expand Down
2 changes: 1 addition & 1 deletion libs/core/kiln_ai/datamodel/test_example_models.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json

import pytest
from kiln_ai.datamodel.models import (
from kiln_ai.datamodel import (
Example,
ExampleOutput,
ExampleOutputSource,
Expand Down
2 changes: 1 addition & 1 deletion libs/core/kiln_ai/datamodel/test_models.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json

import pytest
from kiln_ai.datamodel.models import Priority, Project, Task, TaskDeterminism
from kiln_ai.datamodel import Priority, Project, Task, TaskDeterminism
from kiln_ai.datamodel.test_json_schema import json_joke_schema
from pydantic import ValidationError

Expand Down

0 comments on commit b9a0fc9

Please sign in to comment.