Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Heuristic #9

Draft
wants to merge 53 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
70a5b54
Create heuristics module
Gistbatch Jan 30, 2024
7c6acf9
Create datastructures
Gistbatch Jan 30, 2024
9146c74
Add population intialization draft
Gistbatch Jan 30, 2024
7c296cd
Draft diversification
Gistbatch Jan 30, 2024
bab0701
Draft solution selection
Gistbatch Jan 30, 2024
04cdc08
Update type information
Gistbatch Jan 30, 2024
519dcba
Draft search algorithm
Gistbatch Jan 30, 2024
5d8623a
Draft schedule wrapper
Gistbatch Jan 30, 2024
5845ba2
Build initial test
Gistbatch Jan 30, 2024
42c5d90
Make test fail
Gistbatch Jan 30, 2024
3d872d6
Adapt greedy partitioning
Gistbatch Jan 30, 2024
7622fe0
Implement init schemes
Gistbatch Feb 5, 2024
1b7585a
Fix intis
Gistbatch Feb 5, 2024
3f7ad36
Fix imports
Gistbatch Feb 5, 2024
4573b1d
Implement schedule generation for initialization
Gistbatch Feb 6, 2024
8323c61
Remove unused import
Gistbatch Feb 6, 2024
101a61e
Update job types
Gistbatch Feb 6, 2024
1acad42
Add makespan helper type
Gistbatch Feb 6, 2024
2f64e0e
Update selection with makespan calculation
Gistbatch Feb 6, 2024
796f153
Fix Bin typehint
Gistbatch Feb 6, 2024
b095a75
Use circuit instead of job in initialize
Gistbatch Feb 6, 2024
2325244
Add diversification
Gistbatch Feb 6, 2024
c4f4049
Add docstrings
Gistbatch Feb 6, 2024
44adb74
Improve scatter search function
Gistbatch Feb 6, 2024
36559a3
Switch to CircuitJobs
Gistbatch Feb 6, 2024
dbb3fd6
Update schedule interface
Gistbatch Feb 6, 2024
9a9d052
Add meta params to interface for now
Gistbatch Feb 6, 2024
194966e
Change to kwargs
Gistbatch Feb 6, 2024
154f61f
Fix MakespanInfo circuit access
Gistbatch Feb 6, 2024
b3a14b3
Enable initalization methods
Gistbatch Feb 6, 2024
ef322e3
Fix makespan calculation
Gistbatch Feb 6, 2024
0b13a66
Update heursitic test
Gistbatch Feb 6, 2024
625d548
Add feasibility constraint
Gistbatch Feb 9, 2024
e13600e
Add targeted solution improvement
Gistbatch Feb 9, 2024
8d5d423
Add diversification based on distance
Gistbatch Feb 9, 2024
d238a40
Add update function to discard copies
Gistbatch Feb 9, 2024
2e9af4e
Fix combine solution with __eq__
Gistbatch Feb 12, 2024
5eca64b
Allow estimate for processing time bigger than backend
Gistbatch Feb 12, 2024
640e185
Fix random boundary
Gistbatch Feb 12, 2024
259681b
Parallelize initialization
Gistbatch Feb 12, 2024
89e8350
Parallelize search loop
Gistbatch Feb 12, 2024
0ce94cb
Update dependencies
Gistbatch Feb 15, 2024
a215dc0
Add some debug logging
Gistbatch Feb 15, 2024
9862096
Add temporary solution to run benchmarks
Gistbatch Feb 15, 2024
a2d518f
Create preliminary heuristics benchmark
Gistbatch Feb 15, 2024
cfc16e0
Add info logging for debugging
Gistbatch Feb 15, 2024
a182759
Add reinforcement learning module
Gistbatch Feb 15, 2024
c861fa1
Fix edge cases
Gistbatch Feb 15, 2024
38f8ef8
Disable test for feature branch
Gistbatch Feb 15, 2024
e1b74a4
Fix minor bugs and improve logging
Gistbatch Feb 15, 2024
0eda284
Change info to debug
Gistbatch Feb 15, 2024
bdb54e3
Prevent using infeasible solutions
Gistbatch Feb 15, 2024
416a857
Update ci.yml with MPI
Ectras Mar 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ jobs:
python-version: ["3.10"]
steps:
- uses: actions/checkout@v3
- uses: mpi4py/setup-mpi@v1
- name: Setup PDM
uses: pdm-project/setup-pdm@v3
with:
Expand Down
2 changes: 2 additions & 0 deletions data/benchmark/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Benchmarking tools for the MILP model."""

from .benchmark import run_experiments
from .heuristic_benchmark import run_heuristic_experiments
from .processing import analyze_benchmarks
211 changes: 211 additions & 0 deletions data/benchmark/heuristic_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
"""Generates the benchmark data."""

import logging

from mqt.bench import get_benchmark
from qiskit import QuantumCircuit
import numpy as np

from src.common import jobs_from_experiment
from src.provider import Accelerator
from src.scheduling import (
Benchmark,
InfoProblem,
PTimes,
Result,
SchedulerType,
STimes,
generate_schedule,
)
from src.scheduling.heuristics import (
generate_heuristic_info_schedule as heuristic_schedule,
)

from src.tools import cut_circuit
from utils.helpers import Timer


def _generate_batch(max_qubits: int, circuits_per_batch: int) -> list[QuantumCircuit]:
# Generate a random circuit
batch = []
for _ in range(circuits_per_batch):
size = np.random.randint(2, max_qubits + 1)
circuit = get_benchmark(benchmark_name="ghz", level=1, circuit_size=size)
circuit.remove_final_measurements(inplace=True)
batch.append(circuit)

return batch


def run_heuristic_experiments(
circuits_per_batch: int,
settings: list[list[Accelerator]],
t_max: int,
num_batches: int,
) -> Benchmark:
"""Generates the benchmarks and executes scheduling."""
results: Benchmark = []
for setting in settings:
logging.info("New Setting started...")
max_size = sum(s.qubits for s in setting)
benchmarks = [
_generate_batch(max_size, circuits_per_batch) for _ in range(num_batches)
]
benchmark_results: list[Result] = []
for benchmark in benchmarks:
problme_circuits = _cut_circuits(benchmark, setting)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

problem_circuits

logging.info("Setting up times...")

p_times = _get_benchmark_processing_times(problme_circuits, setting)
s_times = _get_benchmark_setup_times(
problme_circuits,
setting,
default_value=2**5,
)
logging.info("Setting up problems...")
problem = InfoProblem(
base_jobs=problme_circuits,
accelerators={str(acc.uuid): acc.qubits for acc in setting},
big_m=1000,
timesteps=t_max,
process_times=p_times,
setup_times=s_times,
)
result: dict[str, Result] = {}
logging.info("Running benchmark for setting.")
# Run the baseline model
with Timer() as t0:
makespan, jobs, _ = generate_schedule(problem, SchedulerType.BASELINE)
result["baseline"] = Result(makespan, jobs, t0.elapsed)
logging.info("Baseline model done: Makespan: %d.", makespan)
# Run the simple model
# if makespan > t_max:
# continue

# with Timer() as t1:
# makespan, jobs, _ = generate_schedule(problem, SchedulerType.SIMPLE)
# result["simple"] = Result(makespan, jobs, t1.elapsed)
# logging.info("Simple model done: Makespan: %d.", makespan)
# Run the heurstic model
with Timer() as t2:
# TODO convert ScheduledJob to JobResultInfo
makespan, jobs = heuristic_schedule(
benchmark, setting, num_iterations=128, partition_size=4, num_cores=1
)
result["heuristic"] = Result(makespan, jobs, t2.elapsed)
logging.info("Heuristic model done: Makespan: %d.", makespan)
# Store results
benchmark_results.append(results)
if len(benchmark_results) > 0:
results.append({"setting": setting, "benchmarks": benchmark_results})
return results


def _cut_circuits(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is significant code duplication (_cut_circuits, _generate_partitions and _partition_big_to_small) are similar in initialize.py

circuits: list[QuantumCircuit], accelerators: list[Accelerator]
) -> list[QuantumCircuit]:
"""Cuts the circuits into smaller circuits."""
partitions = _generate_partitions(
[circuit.num_qubits for circuit in circuits], accelerators
)
logging.info(
"Partitions: generated: %s",
" ".join(str(partition) for partition in partitions),
)
jobs = []
logging.info("Cutting circuits...")
for idx, circuit in enumerate(circuits):
logging.info("Cutting circuit %d", idx)
if len(partitions[idx]) > 1:
experiments, _ = cut_circuit(circuit, partitions[idx])
jobs += [
job.circuit
for experiment in experiments
for job in jobs_from_experiment(experiment)
]
else:
# assumption for now dont cut to any to smaller
jobs.append(circuit)
return jobs


def _generate_partitions(
circuit_sizes: list[int], accelerators: list[Accelerator]
) -> list[list[int]]:
partitions = []
qpu_sizes = [acc.qubits for acc in accelerators]
num_qubits: int = sum(qpu_sizes)
for circuit_size in circuit_sizes:
if circuit_size > num_qubits:
partition = qpu_sizes
remaining_size = circuit_size - num_qubits
while remaining_size > num_qubits:
partition += qpu_sizes
remaining_size -= num_qubits
if remaining_size == 1:
partition[-1] = partition[-1] - 1
partition.append(2)
else:
partition += _partition_big_to_small(remaining_size, accelerators)
partitions.append(partition)
elif circuit_size > max(qpu_sizes):
partition = _partition_big_to_small(circuit_size, accelerators)
partitions.append(partition)
else:
partitions.append([circuit_size])
return partitions


def _partition_big_to_small(size: int, accelerators: list[Accelerator]) -> list[int]:
partition = []
for qpu in sorted(accelerators, key=lambda a: a.qubits, reverse=True):
take_qubits = min(size, qpu.qubits)
if size - take_qubits == 1:
# We can't have a partition of size 1
# So in this case we take one qubit less to leave a partition of two
take_qubits -= 1
partition.append(take_qubits)
size -= take_qubits
if size == 0:
break
else:
raise ValueError(
"Circuit is too big to fit onto the devices,"
+ f" {size} qubits left after partitioning."
)
return partition


def _get_benchmark_processing_times(
base_jobs: list[QuantumCircuit],
accelerators: list[Accelerator],
) -> PTimes:
return [
[accelerator.compute_processing_time(job) for accelerator in accelerators]
for job in base_jobs
]


def _get_benchmark_setup_times(
base_jobs: list[QuantumCircuit],
accelerators: list[Accelerator],
default_value: float,
) -> STimes:
return [
[
[
(
default_value
if id_i in [id_j, 0]
else (
0
if job_j is None
else accelerator.compute_setup_time(job_i, job_j)
)
)
for accelerator in accelerators
]
for id_i, job_i in enumerate([None] + base_jobs)
]
for id_j, job_j in enumerate([None] + base_jobs)
]
Loading
Loading