Skip to content

Commit

Permalink
fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan committed Jan 13, 2025
1 parent 1fca587 commit 88e6f63
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 36 deletions.
6 changes: 3 additions & 3 deletions python/langsmith/pytest_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ class LangSmithPlugin:

def __init__(self):
"""Initialize."""
from rich.console import Console
from rich.live import Live
from rich.console import Console # type: ignore[import-not-found]
from rich.live import Live # type: ignore[import-not-found]

self.process_status = {} # Track process status
self.status_lock = Lock() # Thread-safe updates
Expand Down Expand Up @@ -109,7 +109,7 @@ def pytest_runtest_logstart(self, nodeid):

def generate_table(self):
"""Generate results table."""
from rich.table import Table
from rich.table import Table # type: ignore[import-not-found]

table = Table()
table.add_column("Test")
Expand Down
54 changes: 21 additions & 33 deletions python/langsmith/testing/_internal.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,8 +503,8 @@ def submit_result(
run_id: uuid.UUID,
error: Optional[str] = None,
skipped: bool = False,
pytest_plugin=None,
pytest_nodeid=None,
pytest_plugin: Any = None,
pytest_nodeid: Any = None,
) -> None:
self._executor.submit(
self._submit_result,
Expand All @@ -520,33 +520,21 @@ def _submit_result(
run_id: uuid.UUID,
error: Optional[str] = None,
skipped: bool = False,
pytest_plugin=None,
pytest_nodeid=None,
pytest_plugin: Any = None,
pytest_nodeid: Any = None,
) -> None:
if error:
if skipped:
self.client.create_feedback(
run_id,
key="pass",
# Don't factor into aggregate score
score=None,
comment=f"Skipped: {repr(error)}",
)
status = "skipped"
else:
self.client.create_feedback(
run_id, key="pass", score=0, comment=f"Error: {repr(error)}"
)
status = "failed"
if skipped:
score = None
status = "skipped"
elif error:
score = 0
status = "failed"
else:
self.client.create_feedback(
run_id,
key="pass",
score=1,
)
score = 1
status = "passed"
if pytest_plugin and pytest_nodeid:
pytest_plugin.update_process_status(pytest_nodeid, {"status": status})
self.client.create_feedback(run_id, key="pass", score=score)

def sync_example(
self,
Expand Down Expand Up @@ -579,6 +567,11 @@ def _sync_example(
pytest_plugin: Any,
pytest_nodeid: Any,
) -> None:
if pytest_plugin and pytest_nodeid:
update = {"inputs": inputs, "reference_outputs": outputs}
update = {k: v for k, v in update.items() if v is not None}
pytest_plugin.update_process_status(pytest_nodeid, update)

inputs_ = _serde_example_values(inputs) if inputs else inputs
outputs_ = _serde_example_values(outputs) if outputs else outputs
try:
Expand Down Expand Up @@ -608,11 +601,6 @@ def _sync_example(
if example.modified_at:
self.update_version(example.modified_at)

if pytest_plugin and pytest_nodeid:
update = {"inputs": inputs, "reference_outputs": outputs}
update = {k: v for k, v in update.items() if v is not None}
pytest_plugin.update_process_status(pytest_nodeid, update)

def _submit_feedback(
self, run_id: ID_TYPE, feedback: Union[dict, list], **kwargs: Any
):
Expand All @@ -626,17 +614,17 @@ def _create_feedback(
self,
run_id: ID_TYPE,
feedback: dict,
pytest_plugin=None,
pytest_nodeid=None,
pytest_plugin: Any = None,
pytest_nodeid: Any = None,
**kwargs: Any,
) -> None:
trace_id = self.client.read_run(run_id).trace_id
self.client.create_feedback(trace_id, **feedback, **kwargs)
if pytest_plugin and pytest_nodeid:
val = feedback["score"] if "score" in feedback else feedback["val"]
pytest_plugin.update_process_status(
pytest_nodeid, {"feedback": {feedback["key"]: val}}
)
trace_id = self.client.read_run(run_id).trace_id
self.client.create_feedback(trace_id, **feedback, **kwargs)

def shutdown(self):
self._executor.shutdown(wait=True)
Expand Down

0 comments on commit 88e6f63

Please sign in to comment.