-
-
-
-
-
-
-
+
-
-
-
+
+
+
+
+
diff --git a/app/web_ui/src/routes/(app)/+page.svelte b/app/web_ui/src/routes/(app)/+page.svelte
index f265b6f..b1c3424 100644
--- a/app/web_ui/src/routes/(app)/+page.svelte
+++ b/app/web_ui/src/routes/(app)/+page.svelte
@@ -1,53 +1,9 @@
- {WebsiteName}
-
-
- {@html jsonldScript}
+ Kiln Studio
+
-
-
-
-
- Kiln Studio
-
-
-
- The
- open source
- ML product platform
-
-
-
-
-
+
Home
diff --git a/app/web_ui/src/routes/(app)/+page.ts b/app/web_ui/src/routes/+layout.ts
similarity index 100%
rename from app/web_ui/src/routes/(app)/+page.ts
rename to app/web_ui/src/routes/+layout.ts
diff --git a/app/web_ui/static/logo.svg b/app/web_ui/static/logo.svg
new file mode 100644
index 0000000..9b78b53
--- /dev/null
+++ b/app/web_ui/static/logo.svg
@@ -0,0 +1,5 @@
+
diff --git a/app/web_ui/tailwind.config.js b/app/web_ui/tailwind.config.js
index 01c6a74..2a687c6 100644
--- a/app/web_ui/tailwind.config.js
+++ b/app/web_ui/tailwind.config.js
@@ -8,19 +8,27 @@ export default {
daisyui: {
themes: [
{
- saasstartertheme: {
- primary: "#180042",
- "primary-content": "#fefbf6",
- secondary: "#c7b9f8",
- neutral: "#180042",
- "neutral-content": "#fefbf6",
- accent: "#db2777",
- "accent-content": "#180042",
- "base-content": "#180042",
- "base-100": "#fefbf6",
- "base-200": "#faedd6",
- success: "#37d399",
- error: "#f77272",
+ kilntheme: {
+ primary: "#ffffff",
+ "primary-content": "#161616",
+ secondary: "#ffffff",
+ "secondary-content": "#161616",
+ accent: "#ffffff",
+ "accent-content": "#161616",
+ neutral: "#000000",
+ "neutral-content": "#bebebe",
+ "base-100": "#ffffff",
+ "base-200": "#F5F5F5",
+ "base-300": "#bebebe",
+ "base-content": "#161616",
+ info: "#a6ffff",
+ "info-content": "#0a1616",
+ success: "#8effe4",
+ "success-content": "#071612",
+ warning: "#fff590",
+ "warning-content": "#161507",
+ error: "#ffd7c5",
+ "error-content": "#16110e",
},
},
],
diff --git a/checks.sh b/checks.sh
index d49a73a..2523f23 100755
--- a/checks.sh
+++ b/checks.sh
@@ -22,13 +22,15 @@ echo "No misspellings found"
echo "${headerStart}Web UI: format, lint, check${headerEnd}"
-changed_files=$(git diff --name-only)
+changed_files=$(git diff --name-only --staged)
if [[ "$changed_files" == *"app/web_ui/"* ]]; then
echo "${headerStart}Checking Web UI: format, lint, check${headerEnd}"
cd app/web_ui
npm run format_check
npm run lint
npm run check
+ echo "Running vite build"
+ npm run build > /dev/null
cd ../..
else
echo "Skipping Web UI: no files changed"
diff --git a/libs/core/kiln_ai/adapters/base_adapter.py b/libs/core/kiln_ai/adapters/base_adapter.py
index 22a8d2a..abe7931 100644
--- a/libs/core/kiln_ai/adapters/base_adapter.py
+++ b/libs/core/kiln_ai/adapters/base_adapter.py
@@ -2,8 +2,8 @@
from abc import ABCMeta, abstractmethod
from typing import Dict
+from kiln_ai.datamodel import Task
from kiln_ai.datamodel.json_schema import validate_schema
-from kiln_ai.datamodel.models import Task
class BaseAdapter(metaclass=ABCMeta):
diff --git a/libs/core/kiln_ai/adapters/langchain_adapters.py b/libs/core/kiln_ai/adapters/langchain_adapters.py
index 34e636c..0fef21b 100644
--- a/libs/core/kiln_ai/adapters/langchain_adapters.py
+++ b/libs/core/kiln_ai/adapters/langchain_adapters.py
@@ -1,6 +1,6 @@
from typing import Dict
-import kiln_ai.datamodel.models as models
+import kiln_ai.datamodel as datamodel
from kiln_ai.adapters.prompt_builders import SimplePromptBuilder
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage, SystemMessage
@@ -13,7 +13,7 @@
class LangChainPromptAdapter(BaseAdapter):
def __init__(
self,
- kiln_task: models.Task,
+ kiln_task: datamodel.Task,
custom_model: BaseChatModel | None = None,
model_name: str | None = None,
provider: str | None = None,
diff --git a/libs/core/kiln_ai/adapters/test_prompt_adaptors.py b/libs/core/kiln_ai/adapters/test_prompt_adaptors.py
index 5fe04e2..c0d8a1a 100644
--- a/libs/core/kiln_ai/adapters/test_prompt_adaptors.py
+++ b/libs/core/kiln_ai/adapters/test_prompt_adaptors.py
@@ -1,7 +1,7 @@
import os
from pathlib import Path
-import kiln_ai.datamodel.models as models
+import kiln_ai.datamodel as datamodel
import pytest
from kiln_ai.adapters.langchain_adapters import LangChainPromptAdapter
from kiln_ai.adapters.ml_model_list import built_in_models, ollama_online
@@ -95,30 +95,30 @@ async def test_all_built_in_models(tmp_path):
def build_test_task(tmp_path: Path):
- project = models.Project(name="test", path=tmp_path / "test.kiln")
+ project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
project.save_to_file()
assert project.name == "test"
- task = models.Task(parent=project, name="test task")
+ task = datamodel.Task(parent=project, name="test task")
task.save_to_file()
assert task.name == "test task"
task.instruction = (
"You are an assistant which performs math tasks provided in plain text."
)
- r1 = models.TaskRequirement(
+ r1 = datamodel.TaskRequirement(
parent=task,
name="BEDMAS",
instruction="You follow order of mathematical operation (BEDMAS)",
)
r1.save_to_file()
- r2 = models.TaskRequirement(
+ r2 = datamodel.TaskRequirement(
parent=task,
name="only basic math",
instruction="If the problem has anything other than addition, subtraction, multiplication, division, and brackets, you will not answer it. Reply instead with 'I'm just a basic calculator, I don't know how to do that'.",
)
r2.save_to_file()
- r3 = models.TaskRequirement(
+ r3 = datamodel.TaskRequirement(
parent=task,
name="Answer format",
instruction="The answer can contain any content about your reasoning, but at the end it should include the final answer in numerals in square brackets. For example if the answer is one hundred, the end of your response should be [100].",
@@ -133,7 +133,7 @@ async def run_simple_test(tmp_path: Path, model_name: str, provider: str | None
return await run_simple_task(task, model_name, provider)
-async def run_simple_task(task: models.Task, model_name: str, provider: str):
+async def run_simple_task(task: datamodel.Task, model_name: str, provider: str):
adapter = LangChainPromptAdapter(task, model_name=model_name, provider=provider)
answer = await adapter.invoke(
diff --git a/libs/core/kiln_ai/adapters/test_prompt_builders.py b/libs/core/kiln_ai/adapters/test_prompt_builders.py
index 218c1c6..f8525b1 100644
--- a/libs/core/kiln_ai/adapters/test_prompt_builders.py
+++ b/libs/core/kiln_ai/adapters/test_prompt_builders.py
@@ -4,7 +4,7 @@
from kiln_ai.adapters.prompt_builders import MultiShotPromptBuilder, SimplePromptBuilder
from kiln_ai.adapters.test_prompt_adaptors import build_test_task
from kiln_ai.adapters.test_structured_output import build_structured_output_test_task
-from kiln_ai.datamodel.models import (
+from kiln_ai.datamodel import (
Example,
ExampleOutput,
ExampleSource,
diff --git a/libs/core/kiln_ai/adapters/test_structured_output.py b/libs/core/kiln_ai/adapters/test_structured_output.py
index b66f83e..c9bf203 100644
--- a/libs/core/kiln_ai/adapters/test_structured_output.py
+++ b/libs/core/kiln_ai/adapters/test_structured_output.py
@@ -3,7 +3,7 @@
import jsonschema
import jsonschema.exceptions
-import kiln_ai.datamodel.models as models
+import kiln_ai.datamodel as datamodel
import pytest
from kiln_ai.adapters.base_adapter import BaseAdapter
from kiln_ai.adapters.langchain_adapters import LangChainPromptAdapter
@@ -51,7 +51,7 @@ async def test_structured_output_ollama_llama(tmp_path):
class MockAdapter(BaseAdapter):
- def __init__(self, kiln_task: models.Task, response: Dict | str | None):
+ def __init__(self, kiln_task: datamodel.Task, response: Dict | str | None):
super().__init__(kiln_task)
self.response = response
@@ -79,8 +79,8 @@ async def test_mock_unstructred_response(tmp_path):
answer = await adapter.invoke("You are a mock, send me the response!")
# Should error, expecting a string, not a dict
- project = models.Project(name="test", path=tmp_path / "test.kiln")
- task = models.Task(parent=project, name="test task")
+ project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
+ task = datamodel.Task(parent=project, name="test task")
task.instruction = (
"You are an assistant which performs math tasks provided in plain text."
)
@@ -112,9 +112,9 @@ async def test_all_built_in_models_structured_output(tmp_path):
def build_structured_output_test_task(tmp_path: Path):
- project = models.Project(name="test", path=tmp_path / "test.kiln")
+ project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
project.save_to_file()
- task = models.Task(
+ task = datamodel.Task(
parent=project,
name="test task",
instruction="You are an assistant which tells a joke, given a subject.",
@@ -148,9 +148,9 @@ async def run_structured_output_test(tmp_path: Path, model_name: str, provider:
def build_structured_input_test_task(tmp_path: Path):
- project = models.Project(name="test", path=tmp_path / "test.kiln")
+ project = datamodel.Project(name="test", path=tmp_path / "test.kiln")
project.save_to_file()
- task = models.Task(
+ task = datamodel.Task(
parent=project,
name="test task",
instruction="You are an assistant which classifies a triangle given the lengths of its sides. If all sides are of equal length, the triangle is equilateral. If two sides are equal, the triangle is isosceles. Otherwise, it is scalene.\n\nAt the end of your response return the result in double square brackets. It should be plain text. It should be exactly one of the three following strings: '[[equilateral]]', or '[[isosceles]]', or '[[scalene]]'.",
diff --git a/libs/core/kiln_ai/datamodel/models.py b/libs/core/kiln_ai/datamodel/__init__.py
similarity index 99%
rename from libs/core/kiln_ai/datamodel/models.py
rename to libs/core/kiln_ai/datamodel/__init__.py
index c8279a6..048250b 100644
--- a/libs/core/kiln_ai/datamodel/models.py
+++ b/libs/core/kiln_ai/datamodel/__init__.py
@@ -13,7 +13,7 @@
from .json_schema import validate_schema
if TYPE_CHECKING:
- from .models import Task
+ from . import Task
# Conventions:
# 1) Names are filename safe as they may be used as file names. They are informational and not to be used in prompts/training/validation.
diff --git a/libs/core/kiln_ai/datamodel/test_example_models.py b/libs/core/kiln_ai/datamodel/test_example_models.py
index 16d5ddd..74a70eb 100644
--- a/libs/core/kiln_ai/datamodel/test_example_models.py
+++ b/libs/core/kiln_ai/datamodel/test_example_models.py
@@ -1,7 +1,7 @@
import json
import pytest
-from kiln_ai.datamodel.models import (
+from kiln_ai.datamodel import (
Example,
ExampleOutput,
ExampleOutputSource,
diff --git a/libs/core/kiln_ai/datamodel/test_models.py b/libs/core/kiln_ai/datamodel/test_models.py
index 0b7ffd0..7bd828e 100644
--- a/libs/core/kiln_ai/datamodel/test_models.py
+++ b/libs/core/kiln_ai/datamodel/test_models.py
@@ -1,7 +1,7 @@
import json
import pytest
-from kiln_ai.datamodel.models import Priority, Project, Task, TaskDeterminism
+from kiln_ai.datamodel import Priority, Project, Task, TaskDeterminism
from kiln_ai.datamodel.test_json_schema import json_joke_schema
from pydantic import ValidationError