Skip to content

Commit

Permalink
execute notebook as task via DefaultExecutionManager
Browse files Browse the repository at this point in the history
  • Loading branch information
andrii-i committed Sep 16, 2024
1 parent 2f6938b commit 3ae8997
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 22 deletions.
54 changes: 32 additions & 22 deletions jupyter_scheduler/executors.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import shutil
import tarfile
import traceback
import multiprocessing as mp
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Dict, List
Expand All @@ -15,9 +16,10 @@
from prefect.futures import as_completed
from prefect_dask.task_runners import DaskTaskRunner

from jupyter_scheduler.models import DescribeJob, JobFeature, Status
from jupyter_scheduler.models import CreateJob, DescribeJob, JobFeature, Status
from jupyter_scheduler.orm import Job, Workflow, create_session
from jupyter_scheduler.parameterize import add_parameters
from jupyter_scheduler.scheduler import Scheduler
from jupyter_scheduler.utils import get_utc_timestamp
from jupyter_scheduler.workflows import DescribeWorkflow

Expand Down Expand Up @@ -186,36 +188,44 @@ def on_complete_workflow(self):
class DefaultExecutionManager(ExecutionManager):
"""Default execution manager that executes notebooks"""

@task(task_run_name="{task_id}")
def execute_task(self, task_id: str):
print(f"Task {task_id} executed")
return task_id
@task
def execute_task(self, job: Job):
with self.db_session() as session:
staging_paths = Scheduler.get_staging_paths(DescribeJob.from_orm(job))

execution_manager = DefaultExecutionManager(
job_id=job.job_id,
staging_paths=staging_paths,
root_dir=self.root_dir,
db_url=self.db_url,
)
execution_manager.process()

job.pid = 1 # TODO: fix pid hardcode
job_id = job.job_id

return job_id

@task
def get_task_data(self, task_ids: List[str] = []):
# TODO: get orm objects from Task table of the db, create DescribeTask for each
tasks_data_obj = [
{"id": "task0", "dependsOn": ["task3"]},
{"id": "task4", "dependsOn": ["task0", "task1", "task2", "task3"]},
{"id": "task1", "dependsOn": []},
{"id": "task2", "dependsOn": ["task1"]},
{"id": "task3", "dependsOn": ["task1", "task2"]},
]

return tasks_data_obj
def get_tasks_records(self, task_ids: List[str]) -> List[Job]:
with self.db_session() as session:
tasks = session.query(Job).filter(Job.job_id.in_(task_ids)).all()

return tasks

@flow
def execute_workflow(self):
tasks_info: List[Job] = self.get_tasks_records(self.model.tasks)
tasks = {task.job_id: task for task in tasks_info}

tasks_info = self.get_task_data()
tasks = {task["id"]: task for task in tasks_info}

# create Prefect tasks, use caching to ensure Prefect tasks are created before wait_for is called on them
@lru_cache(maxsize=None)
def make_task(task_id):
deps = tasks[task_id]["dependsOn"]
"""Create a delayed object for the given task recursively creating delayed objects for all tasks it depends on"""
deps = tasks[task_id].depends_on or []
name = tasks[task_id].name
job_id = tasks[task_id].job_id
return self.execute_task.submit(
task_id, wait_for=[make_task(dep_id) for dep_id in deps]
tasks[task_id], wait_for=[make_task(dep_id) for dep_id in deps]
)

final_tasks = [make_task(task_id) for task_id in tasks]
Expand Down
28 changes: 28 additions & 0 deletions jupyter_scheduler/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,6 +521,11 @@ def create_job(self, model: CreateJob, run: bool = True) -> str:
if not run:
return job.job_id

job_id = self.run_job(job=job, staging_paths=staging_paths)
return job_id

def run_job(self, job: Job, staging_paths: Dict[str, str]) -> str:
with self.db_session() as session:
# The MP context forces new processes to not be forked on Linux.
# This is necessary because `asyncio.get_event_loop()` is bugged in
# forked processes in Python versions below 3.12. This method is
Expand Down Expand Up @@ -556,6 +561,7 @@ def create_workflow(self, model: CreateWorkflow) -> str:
def run_workflow(self, workflow_id: str) -> str:
execution_manager = self.execution_manager_class(
workflow_id=workflow_id,
root_dir=self.root_dir,
db_url=self.db_url,
)
execution_manager.process_workflow()
Expand Down Expand Up @@ -858,6 +864,28 @@ def get_staging_paths(self, model: Union[DescribeJob, DescribeJobDefinition]) ->

return staging_paths

@staticmethod
def get_staging_paths(model: Union[DescribeJob, DescribeJobDefinition]) -> Dict[str, str]:
staging_paths = {}
if not model:
return staging_paths

id = model.job_id if isinstance(model, DescribeJob) else model.job_definition_id

for output_format in model.output_formats:
filename = create_output_filename(
model.input_filename, model.create_time, output_format
)
staging_paths[output_format] = os.path.join(
os.path.join(jupyter_data_dir(), "scheduler_staging_area"), id, filename
)

staging_paths["input"] = os.path.join(
os.path.join(jupyter_data_dir(), "scheduler_staging_area"), id, model.input_filename
)

return staging_paths

async def stop_extension(self):
"""
Cleanup code to run when the server is stopping.
Expand Down

0 comments on commit 3ae8997

Please sign in to comment.