From b98d142feebda5dcba9bb90cb36179c3bc719a1d Mon Sep 17 00:00:00 2001 From: MichaelBroughton Date: Thu, 16 May 2024 09:35:27 -0700 Subject: [PATCH 1/7] [WIP] Upgrade to tf2.15. (#809) * [WIP] Upgrade to tf2.15. * format and wheel test fixes. * Version bump again for wheel test. * Back to python3.10 * remove setup.py deps. * more format fixes. * yet more format. * C++ code formats. * Tutorial fix p1. * Tutorial fix p2. * Tutorials p3. * Add updated install instructions and tutorial install versions. * final cleanup * add setup.py --------- Co-authored-by: root --- .bazelversion | 3 +- .github/workflows/ci.yaml | 12 +- WORKSPACE | 37 +- configure.sh | 30 +- docs/install.md | 33 +- docs/tutorials/barren_plateaus.ipynb | 2 +- docs/tutorials/gradients.ipynb | 2 +- docs/tutorials/hello_many_worlds.ipynb | 4 +- docs/tutorials/mnist.ipynb | 2 +- docs/tutorials/noise.ipynb | 2 +- docs/tutorials/qcnn.ipynb | 4 +- docs/tutorials/quantum_data.ipynb | 2 +- .../quantum_reinforcement_learning.ipynb | 2 +- docs/tutorials/research_tools.ipynb | 6 +- release/setup.py | 10 +- requirements.txt | 17 +- scripts/ci_install.sh | 4 +- tensorflow_quantum/core/ops/BUILD | 543 +++++++++--------- .../core/ops/batch_util_test.py | 1 + .../core/ops/circuit_execution_ops.py | 26 +- .../core/ops/circuit_execution_ops_test.py | 29 +- tensorflow_quantum/core/ops/cirq_ops.py | 51 +- tensorflow_quantum/core/ops/cirq_ops_test.py | 10 +- tensorflow_quantum/core/ops/math_ops/BUILD | 90 +-- .../core/ops/math_ops/fidelity_op_test.py | 1 + .../ops/math_ops/inner_product_grad_test.py | 1 + .../ops/math_ops/inner_product_op_test.py | 1 + .../core/ops/math_ops/simulate_mps_test.py | 1 + .../core/ops/math_ops/tfq_inner_product.cc | 5 + .../ops/math_ops/tfq_inner_product_grad.cc | 5 + .../math_ops/tfq_simulate_1d_expectation.cc | 5 + .../tfq_simulate_1d_sampled_expectation.cc | 5 + tensorflow_quantum/core/ops/noise/BUILD | 82 ++- .../ops/noise/noisy_expectation_op_test.py | 1 + .../noisy_sampled_expectation_op_test.py | 1 + .../core/ops/noise/noisy_samples_op_test.py | 1 + .../core/ops/noise/tfq_noisy_expectation.cc | 5 + .../noise/tfq_noisy_sampled_expectation.cc | 5 + .../core/ops/tfq_adj_grad_op.cc | 10 + .../core/ops/tfq_adj_grad_op_test.py | 1 + .../core/ops/tfq_ps_util_ops_test.py | 1 + .../core/ops/tfq_simulate_expectation_op.cc | 5 + .../core/ops/tfq_simulate_ops_test.py | 1 + .../tfq_simulate_sampled_expectation_op.cc | 5 + .../core/ops/tfq_unitary_op_test.py | 1 + .../core/ops/tfq_utility_ops_test.py | 1 + .../core/serialize/op_deserializer.py | 8 +- .../core/serialize/op_deserializer_test.py | 6 +- .../core/serialize/op_serializer.py | 10 +- .../core/serialize/op_serializer_test.py | 16 +- .../core/serialize/serializable_gate_set.py | 26 +- .../serialize/serializable_gate_set_test.py | 1 + .../core/serialize/serializer_test.py | 31 +- tensorflow_quantum/core/src/BUILD | 6 + .../core/src/circuit_parser_qsim.cc | 18 +- .../core/src/circuit_parser_qsim_test.cc | 39 +- .../datasets/cluster_state_test.py | 1 + .../datasets/spin_system_test.py | 1 + .../python/differentiators/adjoint_test.py | 1 + .../differentiators/differentiator_test.py | 1 + .../python/differentiators/gradient_test.py | 1 + .../linear_combination_test.py | 1 + .../differentiators/parameter_shift_test.py | 1 + .../differentiators/parameter_shift_util.py | 8 +- .../parameter_shift_util_test.py | 1 + .../circuit_construction/elementary_test.py | 1 + .../circuit_executors/expectation_test.py | 1 + .../circuit_executors/input_checks_test.py | 1 + .../layers/circuit_executors/sample_test.py | 1 + .../sampled_expectation_test.py | 1 + .../layers/circuit_executors/state_test.py | 1 + .../layers/circuit_executors/unitary_test.py | 1 + .../layers/high_level/controlled_pqc_test.py | 1 + .../high_level/noisy_controlled_pqc_test.py | 1 + .../python/layers/high_level/noisy_pqc.py | 22 +- .../layers/high_level/noisy_pqc_test.py | 1 + .../python/layers/high_level/pqc.py | 22 +- .../python/layers/high_level/pqc_test.py | 1 + .../python/optimizers/rotosolve_minimizer.py | 4 +- .../optimizers/rotosolve_minimizer_test.py | 3 +- .../python/optimizers/spsa_minimizer_test.py | 3 +- .../python/quantum_context_test.py | 1 + tensorflow_quantum/python/util.py | 5 +- tensorflow_quantum/python/util_test.py | 1 + third_party/tf/qsim.patch | 75 +++ third_party/tf/tf.patch | 74 +++ 86 files changed, 824 insertions(+), 640 deletions(-) create mode 100644 third_party/tf/qsim.patch create mode 100644 third_party/tf/tf.patch diff --git a/.bazelversion b/.bazelversion index 03f488b07..f3c238740 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1,2 @@ -5.3.0 +6.5.0 +# NOTE: Update Bazel version in tensorflow/tools/ci_build/release/common.sh.oss \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 43f043e98..0b9ca153c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Lint tools run: pip install --upgrade pip setuptools; pip install -r requirements.txt; @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Format tools run: pip install --upgrade pip setuptools; pip install -r requirements.txt; sudo apt-get install -y clang-format-6.0 @@ -41,7 +41,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Bazel on CI run: ./scripts/ci_install.sh @@ -61,7 +61,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Bazel on CI run: ./scripts/ci_install.sh @@ -79,7 +79,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Bazel on CI run: ./scripts/ci_install.sh @@ -97,7 +97,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install notebook dependencies run: pip install --upgrade pip seaborn==0.10.0 diff --git a/WORKSPACE b/WORKSPACE index b47e25d2f..5f0025e9a 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,10 +1,13 @@ # This file includes external dependencies that are required to compile the # TensorFlow op. + + load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -EIGEN_COMMIT = "3bb6a48d8c171cf20b5f8e48bfb4e424fbd4f79e" -EIGEN_SHA256 = "eca9847b3fe6249e0234a342b78f73feec07d29f534e914ba5f920f3e09383a3" + + +EIGEN_COMMIT = "aa6964bf3a34fd607837dd8123bc42465185c4f8" http_archive( @@ -16,7 +19,6 @@ cc_library( visibility = ["//visibility:public"], ) """, - sha256 = EIGEN_SHA256, strip_prefix = "eigen-{commit}".format(commit = EIGEN_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/{commit}/eigen-{commit}.tar.gz".format(commit = EIGEN_COMMIT), @@ -27,35 +29,41 @@ cc_library( http_archive( name = "qsim", sha256 = "b9c1eba09a885a938b5e73dfc2e02f5231cf3b01d899415caa24769346a731d5", + # patches = [ + # "//third_party/tf:qsim.patch", + # ], strip_prefix = "qsim-0.13.3", urls = ["https://github.com/quantumlib/qsim/archive/refs/tags/v0.13.3.zip"], ) http_archive( name = "org_tensorflow", - sha256 = "e52cda3bae45f0ae0fccd4055e9fa29892b414f70e2df94df9a3a10319c75fff", - strip_prefix = "tensorflow-2.11.0", + patches = [ + "//third_party/tf:tf.patch", + ], + # sha256 = "e52cda3bae45f0ae0fccd4055e9fa29892b414f70e2df94df9a3a10319c75fff", + strip_prefix = "tensorflow-2.15.0", urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.11.0.zip", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.15.0.zip", ], ) -load("@org_tensorflow//tensorflow:workspace3.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3") -workspace() +tf_workspace3() -load("@org_tensorflow//tensorflow:workspace2.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace2.bzl", "tf_workspace2") -workspace() +tf_workspace2() -load("@org_tensorflow//tensorflow:workspace1.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace1.bzl", "tf_workspace1") -workspace() +tf_workspace1() -load("@org_tensorflow//tensorflow:workspace0.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace0.bzl", "tf_workspace0") -workspace() +tf_workspace0() load("//third_party/tf:tf_configure.bzl", "tf_configure") @@ -72,4 +80,3 @@ bind( name = "six", actual = "@six_archive//:six", ) - diff --git a/configure.sh b/configure.sh index ff42047fe..36e2d08a6 100755 --- a/configure.sh +++ b/configure.sh @@ -73,21 +73,21 @@ done # Check if it's installed -if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then - echo 'Using installed tensorflow' -else - # Uninstall CPU version if it is installed. - if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - pip uninstall tensorflow - elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - pip uninstall tf-nightly - fi - # Install GPU version - echo 'Installing tensorflow .....\n' - pip install tensorflow -fi +# if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then +# echo 'Using installed tensorflow' +# else +# # Uninstall CPU version if it is installed. +# if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then +# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' +# pip uninstall tensorflow +# elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then +# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' +# pip uninstall tf-nightly +# fi +# # Install GPU version +# echo 'Installing tensorflow .....\n' +# pip install tensorflow +# fi diff --git a/docs/install.md b/docs/install.md index 3de77ecf9..61338735c 100644 --- a/docs/install.md +++ b/docs/install.md @@ -10,14 +10,14 @@ There are a few ways to set up your environment to use TensorFlow Quantum (TFQ): Python's pip package manager. * Or build TensorFlow Quantum from source. -TensorFlow Quantum is supported on Python 3.7, 3.8, and 3.9 and depends directly on [Cirq](https://github.com/quantumlib/Cirq). +TensorFlow Quantum is supported on Python 3.9, 3.10, and 3.11 and depends directly on [Cirq](https://github.com/quantumlib/Cirq). ## Pip package ### Requirements -* pip 19.0 or later (requires `manylinux2010` support) -* [TensorFlow == 2.11.0](https://www.tensorflow.org/install/pip) +* pip 19.0 or later (requires `manylinux2014` support) +* [TensorFlow == 2.15.0](https://www.tensorflow.org/install/pip) See the [TensorFlow install guide](https://www.tensorflow.org/install/pip) to set up your Python development environment and an (optional) virtual environment. @@ -27,7 +27,7 @@ Upgrade `pip` and install TensorFlow
   pip3 install --upgrade pip
-  pip3 install tensorflow==2.11.0
+  pip3 install tensorflow==2.15.0
 
@@ -57,13 +57,13 @@ The following steps are tested for Ubuntu-like systems. ### 1. Set up a Python 3 development environment -First we need the Python 3.8 development tools. +First we need the Python 3.10 development tools.
   sudo apt update
-  sudo apt-get install pkg-config zip g++ zlib1g-dev unzip python3.8
-  sudo apt install python3.8 python3.8-dev python3.8-venv python3-pip
-  python3.8 -m pip install --upgrade pip
+  sudo apt-get install pkg-config zip g++ zlib1g-dev unzip python3.10
+  sudo apt install python3.10 python3.10-dev python3.10-venv python3-pip
+  python3.10 -m pip install --upgrade pip
 
@@ -72,7 +72,7 @@ First we need the Python 3.8 development tools. Go to your workspace directory and make a virtual environment for TFQ development.
-  python3.8 -m venv quantum_env
+  python3.10 -m venv quantum_env
   source quantum_env/bin/activate
 
@@ -84,21 +84,20 @@ As noted in the TensorFlow guide, the Bazel build system will be required. -Our latest source builds use TensorFlow 2.11.0. To ensure compatibility we use `bazel` version 5.3.0. To remove any existing version of Bazel: - +Our latest source builds use TensorFlow 2.15.0. To ensure compatibility we use `bazel` version 6.5.0. To remove any existing version of Bazel:
   sudo apt-get remove bazel
 
-Download and install `bazel` version 5.3.0: +Download and install `bazel` version 6.5.0:
-  wget https://github.com/bazelbuild/bazel/releases/download/5.3.0/bazel_5.3.0-linux-x86_64.deb
+  wget https://github.com/bazelbuild/bazel/releases/download/6.5.0/bazel_6.5.0-linux-x86_64.deb
 
-  sudo dpkg -i bazel_5.3.0-linux-x86_64.deb
+  sudo dpkg -i bazel_6.5.0-linux-x86_64.deb
 
@@ -122,7 +121,7 @@ Finally, confirm installation of the correct `bazel` version: ### 4. Build TensorFlow from source Here we adapt instructions from the TensorFlow [build from source](https://www.tensorflow.org/install/source) -guide, see the link for further details. TensorFlow Quantum is compatible with TensorFlow version 2.11.0. +guide, see the link for further details. TensorFlow Quantum is compatible with TensorFlow version 2.15.0. Download the TensorFlow source code: @@ -131,7 +130,7 @@ Download the
   git clone https://github.com/tensorflow/tensorflow.git
   cd tensorflow
-  git checkout v2.11.0
+  git checkout v2.15.0
 
Be sure the virtual environment you created in step 2 is activated. Then, install the TensorFlow dependencies: @@ -141,7 +140,7 @@ Be sure the virtual environment you created in step 2 is activated. Then, instal pip install -U pip six numpy wheel setuptools mock 'future>=0.17.1' pip install -U keras_applications --no-deps pip install -U keras_preprocessing --no-deps - pip install numpy==1.24.2 + pip install numpy==1.23.5 pip install packaging requests diff --git a/docs/tutorials/barren_plateaus.ipynb b/docs/tutorials/barren_plateaus.ipynb index 3c9176eaa..dfbac0fb2 100644 --- a/docs/tutorials/barren_plateaus.ipynb +++ b/docs/tutorials/barren_plateaus.ipynb @@ -97,7 +97,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0" + "!pip install tensorflow==2.15.0" ] }, { diff --git a/docs/tutorials/gradients.ipynb b/docs/tutorials/gradients.ipynb index 072718bcf..fa0525475 100644 --- a/docs/tutorials/gradients.ipynb +++ b/docs/tutorials/gradients.ipynb @@ -99,7 +99,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0" + "!pip install tensorflow==2.15.0" ] }, { diff --git a/docs/tutorials/hello_many_worlds.ipynb b/docs/tutorials/hello_many_worlds.ipynb index 229136219..d069e3455 100644 --- a/docs/tutorials/hello_many_worlds.ipynb +++ b/docs/tutorials/hello_many_worlds.ipynb @@ -103,7 +103,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0" + "!pip install tensorflow==2.15.0" ] }, { @@ -255,7 +255,7 @@ "# Create a circuit on these qubits using the parameters you created above.\n", "circuit = cirq.Circuit(\n", " cirq.rx(a).on(q0),\n", - " cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1))\n", + " cirq.ry(b).on(q1), cirq.CNOT(q0, q1))\n", "\n", "SVGCircuit(circuit)" ] diff --git a/docs/tutorials/mnist.ipynb b/docs/tutorials/mnist.ipynb index 91405ed26..4cccc4b5b 100644 --- a/docs/tutorials/mnist.ipynb +++ b/docs/tutorials/mnist.ipynb @@ -97,7 +97,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0" + "!pip install tensorflow==2.15.0" ] }, { diff --git a/docs/tutorials/noise.ipynb b/docs/tutorials/noise.ipynb index 0a0ebc290..0c79ff02e 100644 --- a/docs/tutorials/noise.ipynb +++ b/docs/tutorials/noise.ipynb @@ -83,7 +83,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0 tensorflow-quantum==0.7.2" + "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.2" ] }, { diff --git a/docs/tutorials/qcnn.ipynb b/docs/tutorials/qcnn.ipynb index f53182701..bc5da0a44 100644 --- a/docs/tutorials/qcnn.ipynb +++ b/docs/tutorials/qcnn.ipynb @@ -105,7 +105,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0" + "!pip install tensorflow==2.15.0" ] }, { @@ -554,7 +554,7 @@ " source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])\n", " pool_circuit.append(sink_basis_selector)\n", " pool_circuit.append(source_basis_selector)\n", - " pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))\n", + " pool_circuit.append(cirq.CNOT(source_qubit, sink_qubit))\n", " pool_circuit.append(sink_basis_selector**-1)\n", " return pool_circuit" ] diff --git a/docs/tutorials/quantum_data.ipynb b/docs/tutorials/quantum_data.ipynb index 9e78b9493..5e6e10fdb 100644 --- a/docs/tutorials/quantum_data.ipynb +++ b/docs/tutorials/quantum_data.ipynb @@ -111,7 +111,7 @@ } ], "source": [ - "!pip install tensorflow==2.7.0 tensorflow-quantum==0.7.2" + "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.2" ] }, { diff --git a/docs/tutorials/quantum_reinforcement_learning.ipynb b/docs/tutorials/quantum_reinforcement_learning.ipynb index fba0291e6..ef3d3d0dc 100644 --- a/docs/tutorials/quantum_reinforcement_learning.ipynb +++ b/docs/tutorials/quantum_reinforcement_learning.ipynb @@ -123,7 +123,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0" + "!pip install tensorflow==2.15.0" ] }, { diff --git a/docs/tutorials/research_tools.ipynb b/docs/tutorials/research_tools.ipynb index 538fcf46c..8efd7a89c 100644 --- a/docs/tutorials/research_tools.ipynb +++ b/docs/tutorials/research_tools.ipynb @@ -83,7 +83,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.7.0 tensorflow-quantum==0.7.2 tensorboard_plugin_profile==2.4.0" + "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.2 tensorboard_plugin_profile==2.4.0" ] }, { @@ -155,8 +155,8 @@ "source": [ "def generate_circuit(qubits):\n", " \"\"\"Generate a random circuit on qubits.\"\"\"\n", - " random_circuit = cirq.generate_boixo_2018_supremacy_circuits_v2(\n", - " qubits, cz_depth=2, seed=1234)\n", + " random_circuit = cirq.experiments.random_rotations_between_grid_interaction_layers_circuit(\n", + " qubits, depth=2)\n", " return random_circuit\n", "\n", "def generate_data(circuit, n_samples):\n", diff --git a/release/setup.py b/release/setup.py index 24424b613..d02d30650 100644 --- a/release/setup.py +++ b/release/setup.py @@ -50,15 +50,11 @@ def finalize_options(self): self.install_lib = self.install_platlib -REQUIRED_PACKAGES = [ - 'cirq-core==0.13.1', 'cirq-google>=0.13.1', 'sympy == 1.8', - 'googleapis-common-protos==1.52.0', 'google-api-core==1.21.0', - 'google-auth==1.18.0', 'protobuf==3.19.4' -] +REQUIRED_PACKAGES = ['cirq-core==1.3.0', 'cirq-google==1.3.0', 'sympy == 1.12'] # placed as extra to not have required overwrite existing nightly installs if # they exist. -EXTRA_PACKAGES = ['tensorflow == 2.11.0'] +EXTRA_PACKAGES = ['tensorflow == 2.15.0'] CUR_VERSION = '0.7.3' @@ -107,6 +103,8 @@ def has_ext_modules(self): 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Scientific/Engineering :: Mathematics', diff --git a/requirements.txt b/requirements.txt index ae8700878..d0fc187eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,8 @@ -cirq-core==0.13.1 -cirq-google==0.13.1 -sympy==1.8 +cirq-core==1.3.0 +cirq-google==1.3.0 +sympy==1.12 numpy==1.24.2 # TensorFlow can detect if it was built against other versions. nbformat==4.4.0 pylint==2.4.4 -yapf==0.28.0 -tensorflow==2.11.0 -# Needed for compatibility with cirq program protos. -googleapis-common-protos==1.52.0 -google-api-core==1.21.0 -google-auth==1.18.0 -google-api-python-client==1.8.0 -grpcio==1.34.1 -protobuf==3.19.4 +yapf==0.40.2 +tensorflow==2.15.0 diff --git a/scripts/ci_install.sh b/scripts/ci_install.sh index 04e6b3159..28c58c2d7 100755 --- a/scripts/ci_install.sh +++ b/scripts/ci_install.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -wget https://github.com/bazelbuild/bazel/releases/download/5.3.0/bazel_5.3.0-linux-x86_64.deb -sudo dpkg -i bazel_5.3.0-linux-x86_64.deb +wget https://github.com/bazelbuild/bazel/releases/download/6.5.0/bazel_6.5.0-linux-x86_64.deb +sudo dpkg -i bazel_6.5.0-linux-x86_64.deb pip install --upgrade pip setuptools wheel pip install -r requirements.txt \ No newline at end of file diff --git a/tensorflow_quantum/core/ops/BUILD b/tensorflow_quantum/core/ops/BUILD index 8087a8d3b..504cc2657 100644 --- a/tensorflow_quantum/core/ops/BUILD +++ b/tensorflow_quantum/core/ops/BUILD @@ -7,10 +7,10 @@ licenses(["notice"]) # Export for the PIP package. exports_files(["__init__.py"]) -config_setting( - name = "windows", - constraint_values = ["@bazel_tools//platforms:windows"], -) +# config_setting( +# name = "windows", +# constraint_values = ["@bazel_tools//platforms:windows"], +# ) py_library( name = "ops", @@ -38,45 +38,45 @@ cc_binary( srcs = [ "tfq_adj_grad_op.cc", ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ ":parse_context", @@ -101,45 +101,45 @@ cc_binary( "tfq_ps_symbol_replace_op.cc", "tfq_ps_weights_from_symbols_op.cc", ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ ":parse_context", @@ -159,45 +159,45 @@ cc_binary( "tfq_simulate_samples_op.cc", "tfq_simulate_state_op.cc", ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ ":parse_context", @@ -225,45 +225,45 @@ cc_binary( "tfq_circuit_append_op.cc", "tfq_resolve_parameters_op.cc", ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ ":parse_context", @@ -273,8 +273,10 @@ cc_binary( "//tensorflow_quantum/core/src:program_resolution", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/functional:any_invocable", "@com_google_absl//absl/types:optional", "@com_google_absl//absl/types:span", + "@com_google_absl//absl/status:statusor", "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", ], @@ -284,41 +286,41 @@ cc_library( name = "parse_context", srcs = ["parse_context.cc"], hdrs = ["parse_context.h"], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), deps = [ ":tfq_simulate_utils", "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", @@ -327,6 +329,8 @@ cc_library( "//tensorflow_quantum/core/src:program_resolution", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/functional:any_invocable", + "@com_google_absl//absl/status:statusor", "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", ], @@ -337,45 +341,45 @@ cc_binary( srcs = [ "tfq_calculate_unitary_op.cc", ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ ":parse_context", @@ -387,6 +391,7 @@ cc_binary( "//tensorflow_quantum/core/src:util_qsim", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/functional:any_invocable", "@com_google_absl//absl/types:optional", "@com_google_absl//absl/types:span", "@local_config_tf//:libtensorflow_framework", @@ -399,41 +404,41 @@ cc_library( name = "tfq_simulate_utils", srcs = ["tfq_simulate_utils.cc"], hdrs = ["tfq_simulate_utils.h"], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), deps = [ "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", diff --git a/tensorflow_quantum/core/ops/batch_util_test.py b/tensorflow_quantum/core/ops/batch_util_test.py index 6b11becd7..074b01fcb 100644 --- a/tensorflow_quantum/core/ops/batch_util_test.py +++ b/tensorflow_quantum/core/ops/batch_util_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops.py b/tensorflow_quantum/core/ops/circuit_execution_ops.py index 1158e98e4..d82a1c83d 100644 --- a/tensorflow_quantum/core/ops/circuit_execution_ops.py +++ b/tensorflow_quantum/core/ops/circuit_execution_ops.py @@ -24,10 +24,12 @@ class TFQStateVectorSimulator(enum.Enum): """Enum to make specifying TFQ simulators user-friendly.""" + # pylint: disable=invalid-name expectation = tfq_simulate_ops.tfq_simulate_expectation samples = tfq_simulate_ops.tfq_simulate_samples state = tfq_simulate_ops.tfq_simulate_state sampled_expectation = tfq_simulate_ops.tfq_simulate_sampled_expectation + # pylint: enable=invalid-name def _check_quantum_concurrent(quantum_concurrent): @@ -37,9 +39,9 @@ def _check_quantum_concurrent(quantum_concurrent): def get_expectation_op( - backend=None, - *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + backend=None, + *, + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): """Get a TensorFlow op that will calculate batches of expectation values. This function produces a non-differentiable TF op that will calculate @@ -150,9 +152,9 @@ def get_expectation_op( def get_sampling_op( - backend=None, - *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + backend=None, + *, + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): """Get a Tensorflow op that produces samples from given quantum circuits. This function produces a non-differentiable op that will calculate @@ -242,9 +244,9 @@ def get_sampling_op( def get_state_op( - backend=None, - *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + backend=None, + *, + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): """Get a TensorFlow op that produces states from given quantum circuits. This function produces a non-differentiable op that will calculate @@ -332,9 +334,9 @@ def get_state_op( def get_sampled_expectation_op( - backend=None, - *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + backend=None, + *, + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): """Get a TensorFlow op that will calculate sampled expectation values. This function produces a non-differentiable TF op that will calculate diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py index f94297cdc..f89349d70 100644 --- a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py +++ b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py @@ -16,17 +16,16 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position -from unittest import mock import numpy as np import tensorflow as tf from absl.testing import parameterized from scipy import stats import cirq -import cirq_google from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops from tensorflow_quantum.python import util @@ -95,13 +94,6 @@ def test_get_expectation_inputs(self): circuit_execution_ops.get_expectation_op( backend=cirq.DensityMatrixSimulator()) circuit_execution_ops.get_expectation_op() - with self.assertRaisesRegex(NotImplementedError, - expected_regex='Sample-based'): - mock_engine = mock.Mock() - circuit_execution_ops.get_expectation_op( - cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) with self.assertRaisesRegex( TypeError, expected_regex="cirq.sim.simulator.SimulatesExpectationValues"): @@ -118,11 +110,6 @@ def test_get_sampled_expectation_inputs(self): backend=cirq.Simulator()) circuit_execution_ops.get_sampled_expectation_op( backend=cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() - circuit_execution_ops.get_sampled_expectation_op( - cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"): circuit_execution_ops.get_sampled_expectation_op(backend="junk") @@ -137,11 +124,6 @@ def test_get_samples_inputs(self): circuit_execution_ops.get_sampling_op(backend=cirq.Simulator()) circuit_execution_ops.get_sampling_op( backend=cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() - circuit_execution_ops.get_sampling_op( - backend=cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) with self.assertRaisesRegex(TypeError, expected_regex="Expected a Cirq.Sampler"): circuit_execution_ops.get_sampling_op(backend="junk") @@ -159,15 +141,6 @@ def test_get_state_inputs(self): with self.assertRaisesRegex(TypeError, expected_regex="Cirq.SimulatesFinalState"): circuit_execution_ops.get_state_op(backend="junk") - with self.assertRaisesRegex(TypeError, - expected_regex="Cirq.SimulatesFinalState"): - mock_engine = mock.Mock() - circuit_execution_ops.get_state_op( - backend=cirq_google.QuantumEngineSampler( - engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) - with self.assertRaisesRegex(TypeError, expected_regex="must be type bool."): circuit_execution_ops.get_state_op(quantum_concurrent='junk') diff --git a/tensorflow_quantum/core/ops/cirq_ops.py b/tensorflow_quantum/core/ops/cirq_ops.py index 2650e812b..472f9173b 100644 --- a/tensorflow_quantum/core/ops/cirq_ops.py +++ b/tensorflow_quantum/core/ops/cirq_ops.py @@ -19,7 +19,6 @@ import numpy as np import tensorflow as tf import cirq -import cirq_google from tensorflow_quantum.core.ops import batch_util from tensorflow_quantum.core.proto import pauli_sum_pb2 @@ -472,7 +471,6 @@ def _no_grad(grad): if not isinstance(num_samples.dtype.as_numpy_dtype(), numbers.Integral): raise TypeError("num_samples tensor must be of integer type") - serialized_programs = programs programs, resolvers = _batch_deserialize_helper(programs, symbol_names, symbol_values) @@ -491,49 +489,12 @@ def _no_grad(grad): ] max_n_qubits = max(len(p.all_qubits()) for p in programs) - if isinstance(sampler, cirq_google.QuantumEngineSampler): - # group samples from identical circuits to reduce communication - # overhead. Have to keep track of the order in which things came - # in to make sure the output is ordered correctly - to_be_grouped = [ - (ser_prog.numpy(), resolver, index) - for index, ( - ser_prog, - resolver) in enumerate(zip(serialized_programs, resolvers)) - ] - - grouped = _group_tuples(to_be_grouped) - - # start all the necessary jobs - results_mapping = {} - for key, value in grouped.items(): - program = programs[value[0][1]] - resolvers = [x[0] for x in value] - orders = [x[1] for x in value] - - # sampler.run_sweep blocks until results are in, so go around it - result = sampler._engine.run_sweep( - program=program, - params=resolvers, - repetitions=num_samples, - processor_ids=sampler._processor_ids, - gate_set=sampler._gate_set) - results_mapping[result] = orders - - # get all results - cirq_results = [None] * len(programs) - for key, value in results_mapping.items(): - this_results = key.results() - for result, index in zip(this_results, value): - cirq_results[index] = result - - else: - # All other cirq.Samplers handled here. - cirq_results = [] - for results in sampler.run_batch(programs, - params_list=resolvers, - repetitions=num_samples): - cirq_results.extend(results) + # All other cirq.Samplers handled here. + cirq_results = [] + for results in sampler.run_batch(programs, + params_list=resolvers, + repetitions=num_samples): + cirq_results.extend(results) results = [] for r in cirq_results: diff --git a/tensorflow_quantum/core/ops/cirq_ops_test.py b/tensorflow_quantum/core/ops/cirq_ops_test.py index 1b54771a9..0d87855d2 100644 --- a/tensorflow_quantum/core/ops/cirq_ops_test.py +++ b/tensorflow_quantum/core/ops/cirq_ops_test.py @@ -16,16 +16,15 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position -from unittest import mock import numpy as np import tensorflow as tf from absl.testing import parameterized import cirq -import cirq_google from tensorflow_quantum.core.ops import cirq_ops from tensorflow_quantum.core.serialize import serializer @@ -348,11 +347,6 @@ def test_get_cirq_sampling_op(self): cirq_ops._get_cirq_samples() cirq_ops._get_cirq_samples(cirq.Simulator()) cirq_ops._get_cirq_samples(cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() - cirq_ops._get_cirq_samples( - cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) def test_cirq_sampling_op_inputs(self): """test input checking in the cirq sampling op.""" @@ -451,7 +445,7 @@ class DummySampler(cirq.Sampler): def run_sweep(self, program, params, repetitions): """Returns all ones in the correct sample shape.""" return [ - cirq.Result( + cirq.ResultDict( params=param, measurements={ 'tfq': diff --git a/tensorflow_quantum/core/ops/math_ops/BUILD b/tensorflow_quantum/core/ops/math_ops/BUILD index 6eb8a0320..3c390f8e3 100644 --- a/tensorflow_quantum/core/ops/math_ops/BUILD +++ b/tensorflow_quantum/core/ops/math_ops/BUILD @@ -7,10 +7,10 @@ licenses(["notice"]) # Export for the PIP package. exports_files(["__init__.py"]) -config_setting( - name = "windows", - constraint_values = ["@bazel_tools//platforms:windows"], -) +# config_setting( +# name = "windows", +# constraint_values = ["@bazel_tools//platforms:windows"], +# ) cc_binary( name = "_tfq_math_ops.so", @@ -21,45 +21,45 @@ cc_binary( "tfq_simulate_1d_samples.cc", "tfq_simulate_1d_sampled_expectation.cc", ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ # cirq cc proto @@ -68,10 +68,12 @@ cc_binary( "//tensorflow_quantum/core/src:adj_util", "//tensorflow_quantum/core/src:circuit_parser_qsim", "//tensorflow_quantum/core/src:util_qsim", + "@com_google_absl//absl/functional:any_invocable", "@qsim//lib:mps_simulator", "@qsim//lib:mps_statespace", "@qsim//lib:qsim_lib", - "@eigen//:eigen3", + + # "@eigen//:eigen3", # tensorflow core framework # tensorflow core lib # tensorflow core protos diff --git a/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py b/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py index 1ab8031ea..9d45d013a 100644 --- a/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py +++ b/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py b/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py index 7fa74ee40..898c7461b 100644 --- a/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py +++ b/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py b/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py index ae83857b6..7322506b7 100644 --- a/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py +++ b/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py b/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py index 114706d64..d80629861 100644 --- a/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py +++ b/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index 74751f9cc..6a5c9db49 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -54,6 +54,11 @@ class TfqInnerProductOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 4 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "other_programs must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_internal_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc index 3db493b11..198f92c63 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc @@ -55,6 +55,11 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "other_programs must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_internal_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc index c00b43a9b..aacf82cb6 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc @@ -63,6 +63,11 @@ class TfqSimulateMPS1DExpectationOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 4 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc index ba94e8c72..e7014eb46 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc @@ -65,6 +65,11 @@ class TfqSimulateMPS1DSampledExpectationOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/noise/BUILD b/tensorflow_quantum/core/ops/noise/BUILD index 3758037e5..8548b791f 100644 --- a/tensorflow_quantum/core/ops/noise/BUILD +++ b/tensorflow_quantum/core/ops/noise/BUILD @@ -7,10 +7,6 @@ licenses(["notice"]) # Export for the PIP package. exports_files(["__init__.py"]) -config_setting( - name = "windows", - constraint_values = ["@bazel_tools//platforms:windows"], -) cc_binary( name = "_tfq_noise_ops.so", @@ -19,45 +15,45 @@ cc_binary( "tfq_noisy_sampled_expectation.cc", "tfq_noisy_samples.cc" ], - copts = select({ - ":windows": [ - "/D__CLANG_SUPPORT_DYN_ANNOTATION__", - "/D_USE_MATH_DEFINES", - "/DEIGEN_MPL2_ONLY", - "/DEIGEN_MAX_ALIGN_BYTES=64", - "/DEIGEN_HAS_TYPE_TRAITS=0", - "/DTF_USE_SNAPPY", - "/showIncludes", - "/MD", - "/O2", - "/DNDEBUG", - "/w", - "-DWIN32_LEAN_AND_MEAN", - "-DNOGDI", - "/d2ReducedOptimizeHugeFunctions", - "/arch:AVX", - "/std:c++17", - "-DTENSORFLOW_MONOLITHIC_BUILD", - "/DPLATFORM_WINDOWS", - "/DEIGEN_HAS_C99_MATH", - "/DTENSORFLOW_USE_EIGEN_THREADPOOL", - "/DEIGEN_AVOID_STL_ARRAY", - "/Iexternal/gemmlowp", - "/wd4018", - "/wd4577", - "/DNOGDI", - "/UTF_COMPILE_LIBRARY", - ], - "//conditions:default": [ - "-pthread", - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=1", - ], - }), - features = select({ - ":windows": ["windows_export_all_symbols"], - "//conditions:default": [], - }), + # copts = select({ + # ":windows": [ + # "/D__CLANG_SUPPORT_DYN_ANNOTATION__", + # "/D_USE_MATH_DEFINES", + # "/DEIGEN_MPL2_ONLY", + # "/DEIGEN_MAX_ALIGN_BYTES=64", + # "/DEIGEN_HAS_TYPE_TRAITS=0", + # "/DTF_USE_SNAPPY", + # "/showIncludes", + # "/MD", + # "/O2", + # "/DNDEBUG", + # "/w", + # "-DWIN32_LEAN_AND_MEAN", + # "-DNOGDI", + # "/d2ReducedOptimizeHugeFunctions", + # "/arch:AVX", + # "/std:c++17", + # "-DTENSORFLOW_MONOLITHIC_BUILD", + # "/DPLATFORM_WINDOWS", + # "/DEIGEN_HAS_C99_MATH", + # "/DTENSORFLOW_USE_EIGEN_THREADPOOL", + # "/DEIGEN_AVOID_STL_ARRAY", + # "/Iexternal/gemmlowp", + # "/wd4018", + # "/wd4577", + # "/DNOGDI", + # "/UTF_COMPILE_LIBRARY", + # ], + # "//conditions:default": [ + # "-pthread", + # "-std=c++17", + # "-D_GLIBCXX_USE_CXX11_ABI=1", + # ], + # }), + # features = select({ + # ":windows": ["windows_export_all_symbols"], + # "//conditions:default": [], + # }), linkshared = 1, deps = [ # cirq cc proto diff --git a/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py b/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py index 1e73500b8..953829318 100644 --- a/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py +++ b/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py b/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py index 35d1cc113..c76122070 100644 --- a/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py +++ b/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py b/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py index b952e8d40..7790bda8b 100644 --- a/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py +++ b/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc index c67fa01f7..5cbf0b50d 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc @@ -65,6 +65,11 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc index aa0c85691..89263b56a 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc @@ -66,6 +66,11 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index e7252baee..c96a9cb0d 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -55,6 +55,16 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(2).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "symbol_values must be rank 2. Got ", context->input(3).dims()))); + + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_param_size = context->input(2).dim_size(1); diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py b/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py index 388bb163f..bba67db0a 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py b/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py index 14bccd9bf..96059b1d4 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py +++ b/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc index bca6d2f63..7583437ca 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc @@ -54,6 +54,11 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 4 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py index 4cdbe42e5..93c1770e2 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py +++ b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc index e0ed05a49..b9f9ee982 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc @@ -58,6 +58,11 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); + OP_REQUIRES( + context, context->input(3).dims() == 2, + tensorflow::errors::InvalidArgument(absl::StrCat( + "pauli_sums must be rank 2. Got ", context->input(3).dims()))); + // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); diff --git a/tensorflow_quantum/core/ops/tfq_unitary_op_test.py b/tensorflow_quantum/core/ops/tfq_unitary_op_test.py index 212094056..5bba1df59 100644 --- a/tensorflow_quantum/core/ops/tfq_unitary_op_test.py +++ b/tensorflow_quantum/core/ops/tfq_unitary_op_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/ops/tfq_utility_ops_test.py b/tensorflow_quantum/core/ops/tfq_utility_ops_test.py index 00c5ff791..8faf14aaf 100644 --- a/tensorflow_quantum/core/ops/tfq_utility_ops_test.py +++ b/tensorflow_quantum/core/ops/tfq_utility_ops_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/serialize/op_deserializer.py b/tensorflow_quantum/core/serialize/op_deserializer.py index 667ee0ef0..bda2a8f0b 100644 --- a/tensorflow_quantum/core/serialize/op_deserializer.py +++ b/tensorflow_quantum/core/serialize/op_deserializer.py @@ -57,10 +57,10 @@ def qubit_from_proto(proto_id): def _arg_from_proto( - arg_proto, - *, - arg_function_language, - required_arg_name=None, + arg_proto, + *, + arg_function_language, + required_arg_name=None, ): """Extracts a python value from an argument value proto. Args: diff --git a/tensorflow_quantum/core/serialize/op_deserializer_test.py b/tensorflow_quantum/core/serialize/op_deserializer_test.py index ce1748b65..50c6decbe 100644 --- a/tensorflow_quantum/core/serialize/op_deserializer_test.py +++ b/tensorflow_quantum/core/serialize/op_deserializer_test.py @@ -15,6 +15,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position @@ -38,13 +39,16 @@ def op_proto(json_dict): @cirq.value_equality -class GateWithAttribute(cirq.SingleQubitGate): +class GateWithAttribute(cirq.Gate): """GateAttribute helper class.""" def __init__(self, val, not_req=None): self.val = val self.not_req = not_req + def num_qubits(self): + return 1 + def _value_equality_values_(self): return (self.val,) diff --git a/tensorflow_quantum/core/serialize/op_serializer.py b/tensorflow_quantum/core/serialize/op_serializer.py index 509216def..bdb1effc5 100644 --- a/tensorflow_quantum/core/serialize/op_serializer.py +++ b/tensorflow_quantum/core/serialize/op_serializer.py @@ -176,11 +176,11 @@ def can_serialize_operation(self, op): return supported_gate_type and self.can_serialize_predicate(op) def to_proto( - self, - op, - msg=None, - *, - arg_function_language='', + self, + op, + msg=None, + *, + arg_function_language='', ): """Returns the cirq_google.api.v2.Operation message as a proto dict.""" diff --git a/tensorflow_quantum/core/serialize/op_serializer_test.py b/tensorflow_quantum/core/serialize/op_serializer_test.py index a485091e7..921d1e020 100644 --- a/tensorflow_quantum/core/serialize/op_serializer_test.py +++ b/tensorflow_quantum/core/serialize/op_serializer_test.py @@ -15,6 +15,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position @@ -38,32 +39,41 @@ def op_proto(json): return op -class GateWithAttribute(cirq.SingleQubitGate): +class GateWithAttribute(cirq.Gate): """GateAttribute helper class.""" def __init__(self, val): self.val = val + def num_qubits(self): + return 1 + -class GateWithProperty(cirq.SingleQubitGate): +class GateWithProperty(cirq.Gate): """GateProperty helper class.""" def __init__(self, val, not_req=None): self._val = val self._not_req = not_req + def num_qubits(self): + return 1 + @property def val(self): """get val.""" return self._val -class GateWithMethod(cirq.SingleQubitGate): +class GateWithMethod(cirq.Gate): """GateMethod helper class.""" def __init__(self, val): self._val = val + def num_qubits(self): + return 1 + def get_val(self): """get val.""" return self._val diff --git a/tensorflow_quantum/core/serialize/serializable_gate_set.py b/tensorflow_quantum/core/serialize/serializable_gate_set.py index 977f05dfb..bdf1e97b1 100644 --- a/tensorflow_quantum/core/serialize/serializable_gate_set.py +++ b/tensorflow_quantum/core/serialize/serializable_gate_set.py @@ -140,11 +140,11 @@ def serialize(self, program, msg=None, *, arg_function_language=None): return msg def serialize_op( - self, - op, - msg=None, - *, - arg_function_language='', + self, + op, + msg=None, + *, + arg_function_language='', ): """Serialize an Operation to cirq_google.api.v2.Operation proto. @@ -195,10 +195,10 @@ def deserialize(self, proto, device=None): raise NotImplementedError('Program proto does not contain a circuit.') def deserialize_op( - self, - operation_proto, - *, - arg_function_language='', + self, + operation_proto, + *, + arg_function_language='', ): """Deserialize an Operation from a cirq_google.api.v2.Operation. @@ -231,10 +231,10 @@ def _serialize_circuit(self, circuit, msg, *, arg_function_language): arg_function_language=arg_function_language) def _deserialize_circuit( - self, - circuit_proto, - *, - arg_function_language, + self, + circuit_proto, + *, + arg_function_language, ): moments = [] for i, moment_proto in enumerate(circuit_proto.moments): diff --git a/tensorflow_quantum/core/serialize/serializable_gate_set_test.py b/tensorflow_quantum/core/serialize/serializable_gate_set_test.py index 1e6fd9861..e8f94b1db 100644 --- a/tensorflow_quantum/core/serialize/serializable_gate_set_test.py +++ b/tensorflow_quantum/core/serialize/serializable_gate_set_test.py @@ -15,6 +15,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/core/serialize/serializer_test.py b/tensorflow_quantum/core/serialize/serializer_test.py index a43da0cf0..3a89a03ea 100644 --- a/tensorflow_quantum/core/serialize/serializer_test.py +++ b/tensorflow_quantum/core/serialize/serializer_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position @@ -93,16 +94,26 @@ def _build_op_proto(gate_id, arg_names, arg_vals, qubit_ids): circuit_proto = program_proto.circuit circuit_proto.scheduling_strategy = circuit_proto.MOMENT_BY_MOMENT - circuit_proto.moments.add(operations=[program_pb2.Operation( - gate = program_pb2.Gate(id=gate_id), - args = {arg_names[i]: (program_pb2.Arg(symbol=arg_vals[i]) \ - if isinstance(arg_vals[i], str) else \ - program_pb2.Arg( - arg_value=program_pb2.ArgValue( - float_value=np.round(float(arg_vals[i]), 6)))) \ - for i in range(len(arg_vals))}, - qubits=[program_pb2.Qubit( - id=q_id) for q_id in qubit_ids])]) + + qubit_protos = [program_pb2.Qubit(id=q_id) for q_id in qubit_ids] + + def _create_arg(value): + """Creates a program_pb2.Arg based on the value type.""" + if isinstance(value, str): + return program_pb2.Arg(symbol=value) + return program_pb2.Arg(arg_value=program_pb2.ArgValue( + float_value=round(float(value), 6))) + + qubit_protos = [program_pb2.Qubit(id=q_id) for q_id in qubit_ids] + all_operations = [ + program_pb2.Operation(gate=program_pb2.Gate(id=gate_id), + args={ + name: _create_arg(value) + for name, value in zip(arg_names, arg_vals) + }, + qubits=qubit_protos) + ] + circuit_proto.moments.add(operations=all_operations) # Add in empty control information t = program_proto.circuit.moments[0].operations[0] diff --git a/tensorflow_quantum/core/src/BUILD b/tensorflow_quantum/core/src/BUILD index 5595a5ca2..9b9419b5b 100644 --- a/tensorflow_quantum/core/src/BUILD +++ b/tensorflow_quantum/core/src/BUILD @@ -56,6 +56,7 @@ cc_library( "//tensorflow_quantum/core/proto:projector_sum_cc_proto", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", "@qsim//lib:channel", @@ -99,6 +100,8 @@ cc_library( "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", "//tensorflow_quantum/core/proto:projector_sum_cc_proto", "@com_google_absl//absl/container:inlined_vector", # unclear why needed. + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", "@qsim//lib:qsim_lib", @@ -114,6 +117,9 @@ cc_test( ":util_qsim", "@com_google_absl//absl/container:flat_hash_map", "@com_google_googletest//:gtest_main", + "@com_google_absl//absl/functional:any_invocable", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/status", "@local_config_tf//:libtensorflow_framework", "@local_config_tf//:tf_header_lib", "@qsim//lib:qsim_lib", diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim.cc b/tensorflow_quantum/core/src/circuit_parser_qsim.cc index 1024d28c7..2b3e81d19 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim.cc @@ -58,8 +58,7 @@ inline Status ParseProtoArg( // iterator> const auto arg_v = op.args().find(arg_name); if (arg_v == op.args().end()) { - return Status(static_cast( - absl::StatusCode::kInvalidArgument), + return Status(absl::StatusCode::kInvalidArgument, "Could not find arg: " + arg_name + " in op."); } // find proto arg field. @@ -71,8 +70,7 @@ inline Status ParseProtoArg( const auto iter = param_map.find(proto_arg.symbol()); if (iter == param_map.end()) { return Status( - static_cast( - absl::StatusCode::kInvalidArgument), + absl::StatusCode::kInvalidArgument, "Could not find symbol in parameter map: " + proto_arg.symbol()); } *result = iter->second.second; @@ -103,8 +101,7 @@ inline Status ParseProtoControls(const Operation& op, absl::StrSplit(control_v_str, ','); if (control_toks.size() != control_v_toks.size()) { - return Status(static_cast( - absl::StatusCode::kInvalidArgument), + return Status(absl::StatusCode::kInvalidArgument, "Mistmatched number of control qubits and control values."); } if (control_toks.empty()) { @@ -123,8 +120,7 @@ inline Status ParseProtoControls(const Operation& op, for (auto tok : control_v_toks) { valid = absl::SimpleAtoi(tok, &tmp); if (!valid) { - return Status(static_cast( - absl::StatusCode::kInvalidArgument), + return Status(absl::StatusCode::kInvalidArgument, "Unparseable control value: " + std::string(tok)); } control_values->push_back(tmp); @@ -595,8 +591,7 @@ tensorflow::Status ParseAppendGate(const Operation& op, auto build_f = func_map.find(op.gate().id()); if (build_f == func_map.end()) { *lookup_succeeded = false; - return Status(static_cast( - absl::StatusCode::kInvalidArgument), + return Status(absl::StatusCode::kInvalidArgument, absl::StrCat("Could not parse gate id: ", op.gate().id(), ". This is likely because a cirq.Channel was " "used in an op that does not support them.")); @@ -780,8 +775,7 @@ tensorflow::Status ParseAppendChannel(const Operation& op, auto build_f = chan_func_map.find(op.gate().id()); if (build_f == chan_func_map.end()) { - return Status(static_cast( - absl::StatusCode::kInvalidArgument), + return Status(absl::StatusCode::kInvalidArgument, absl::StrCat("Could not parse channel id: ", op.gate().id())); } return build_f->second(op, num_qubits, time, ncircuit); diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index e6ea68e80..e4d487a85 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -191,8 +191,7 @@ TEST_P(TwoQubitEigenFixture, TwoEigenGate) { // Test case where proto arg missing. ASSERT_EQ(QsimCircuitFromProgram(program_proto, symbol_map, 2, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find arg: exponent in op.")); test_circuit.gates.clear(); @@ -204,8 +203,7 @@ TEST_P(TwoQubitEigenFixture, TwoEigenGate) { ASSERT_EQ( QsimCircuitFromProgram(program_proto, symbol_map, 2, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find symbol in parameter map: alpha")); } @@ -363,8 +361,7 @@ TEST_P(SingleQubitEigenFixture, SingleEigenGate) { // Test case where proto arg missing. ASSERT_EQ(QsimCircuitFromProgram(program_proto, symbol_map, 1, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find arg: exponent in op.")); test_circuit.gates.clear(); @@ -376,8 +373,7 @@ TEST_P(SingleQubitEigenFixture, SingleEigenGate) { ASSERT_EQ( QsimCircuitFromProgram(program_proto, symbol_map, 1, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find symbol in parameter map: alpha")); } @@ -695,8 +691,7 @@ TEST(QsimCircuitParserTest, FsimGate) { // Test case where proto arg missing. ASSERT_EQ(QsimCircuitFromProgram(program_proto, symbol_map, 2, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find arg: theta in op.")); test_circuit.gates.clear(); @@ -708,8 +703,7 @@ TEST(QsimCircuitParserTest, FsimGate) { ASSERT_EQ( QsimCircuitFromProgram(program_proto, symbol_map, 2, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find symbol in parameter map: alpha")); } @@ -859,8 +853,7 @@ TEST(QsimCircuitParserTest, PhasedISwap) { // Test case where proto arg missing. ASSERT_EQ(QsimCircuitFromProgram(program_proto, symbol_map, 2, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find arg: phase_exponent in op.")); test_circuit.gates.clear(); @@ -872,8 +865,7 @@ TEST(QsimCircuitParserTest, PhasedISwap) { ASSERT_EQ( QsimCircuitFromProgram(program_proto, symbol_map, 2, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find symbol in parameter map: alpha")); } @@ -1027,8 +1019,7 @@ TEST(QsimCircuitParserTest, PhasedXPow) { // Test case where proto arg missing. ASSERT_EQ(QsimCircuitFromProgram(program_proto, symbol_map, 1, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find arg: phase_exponent in op.")); test_circuit.gates.clear(); @@ -1040,8 +1031,7 @@ TEST(QsimCircuitParserTest, PhasedXPow) { ASSERT_EQ( QsimCircuitFromProgram(program_proto, symbol_map, 1, &test_circuit, &fused_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not find symbol in parameter map: alpha")); } @@ -1134,8 +1124,7 @@ TEST(QsimCircuitParserTest, InvalidControlValues) { ASSERT_EQ(QsimCircuitFromProgram(program_proto, empty_map, 3, &test_circuit, &fused_circuit, &metadata), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Unparseable control value: junk")); } @@ -1168,8 +1157,7 @@ TEST(QsimCircuitParserTest, MismatchControlNum) { ASSERT_EQ(QsimCircuitFromProgram(program_proto, empty_map, 3, &test_circuit, &fused_circuit, &metadata), tensorflow::Status( - static_cast( - absl::StatusCode::kInvalidArgument), + absl::StatusCode::kInvalidArgument, "Mistmatched number of control qubits and control values.")); } @@ -1571,8 +1559,7 @@ TEST(QsimCircuitParserTest, NoisyBadProto) { NoisyQsimCircuit test_circuit; ASSERT_EQ( NoisyQsimCircuitFromProgram(program_proto, {}, 1, false, &test_circuit), - tensorflow::Status(static_cast( - absl::StatusCode::kInvalidArgument), + tensorflow::Status(absl::StatusCode::kInvalidArgument, "Could not parse channel id: ABCDEFG")); } diff --git a/tensorflow_quantum/datasets/cluster_state_test.py b/tensorflow_quantum/datasets/cluster_state_test.py index 49e75309d..b70da3460 100644 --- a/tensorflow_quantum/datasets/cluster_state_test.py +++ b/tensorflow_quantum/datasets/cluster_state_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/datasets/spin_system_test.py b/tensorflow_quantum/datasets/spin_system_test.py index 3dac53a80..cfb20ee4c 100644 --- a/tensorflow_quantum/datasets/spin_system_test.py +++ b/tensorflow_quantum/datasets/spin_system_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/differentiators/adjoint_test.py b/tensorflow_quantum/python/differentiators/adjoint_test.py index ffbf9173e..6eb0804ff 100644 --- a/tensorflow_quantum/python/differentiators/adjoint_test.py +++ b/tensorflow_quantum/python/differentiators/adjoint_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/differentiators/differentiator_test.py b/tensorflow_quantum/python/differentiators/differentiator_test.py index 6f21f2e18..d971b4f4f 100644 --- a/tensorflow_quantum/python/differentiators/differentiator_test.py +++ b/tensorflow_quantum/python/differentiators/differentiator_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/differentiators/gradient_test.py b/tensorflow_quantum/python/differentiators/gradient_test.py index f666ad801..8aef45190 100644 --- a/tensorflow_quantum/python/differentiators/gradient_test.py +++ b/tensorflow_quantum/python/differentiators/gradient_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/differentiators/linear_combination_test.py b/tensorflow_quantum/python/differentiators/linear_combination_test.py index f46b086e4..54c24bf59 100644 --- a/tensorflow_quantum/python/differentiators/linear_combination_test.py +++ b/tensorflow_quantum/python/differentiators/linear_combination_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/differentiators/parameter_shift_test.py b/tensorflow_quantum/python/differentiators/parameter_shift_test.py index 5a0846f25..f7c248566 100644 --- a/tensorflow_quantum/python/differentiators/parameter_shift_test.py +++ b/tensorflow_quantum/python/differentiators/parameter_shift_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/differentiators/parameter_shift_util.py b/tensorflow_quantum/python/differentiators/parameter_shift_util.py index 266b3b68e..c079df302 100644 --- a/tensorflow_quantum/python/differentiators/parameter_shift_util.py +++ b/tensorflow_quantum/python/differentiators/parameter_shift_util.py @@ -22,7 +22,10 @@ @tf.function -def parse_programs(programs, symbol_names, symbol_values, n_symbols, +def parse_programs(programs, + symbol_names, + symbol_values, + n_symbols, n_shifts=2): """Helper function to get parameter-shifted programs after parsing programs. @@ -85,6 +88,7 @@ def parse_programs(programs, symbol_names, symbol_values, n_symbols, axis=-1) weights_plus = coeff * np.pi * 0.5 * 0.5 * delta_eig + # pylint: disable=no-value-for-parameter,unexpected-keyword-arg weights = tf.concat([weights_plus, -weights_plus], axis=-1) shifts_plus = tf.math.divide_no_nan(tf.math.divide(1.0, delta_eig), coeff) @@ -94,5 +98,5 @@ def parse_programs(programs, symbol_names, symbol_values, n_symbols, axis=-1), [1, 1, n_param_gates, n_shifts]) shifts = val + tf.concat([shifts_plus, -shifts_plus], axis=-1) - + # pylint: enable=no-value-for-parameter,unexpected-keyword-arg return new_programs, weights, shifts, n_param_gates diff --git a/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py b/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py index 8c1083a10..1c6999b74 100644 --- a/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py +++ b/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py b/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py index 38577dbe9..acc6e67af 100644 --- a/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py +++ b/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py index e4489e763..613c1950c 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_executors/input_checks_test.py b/tensorflow_quantum/python/layers/circuit_executors/input_checks_test.py index affbd55d7..f966c8dff 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/input_checks_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/input_checks_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_executors/sample_test.py b/tensorflow_quantum/python/layers/circuit_executors/sample_test.py index b83848132..8b5697dc7 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sample_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sample_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py index 4e96a5c49..0828bc66f 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_executors/state_test.py b/tensorflow_quantum/python/layers/circuit_executors/state_test.py index 8addb0d8f..34d735fdd 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/state_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/state_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/circuit_executors/unitary_test.py b/tensorflow_quantum/python/layers/circuit_executors/unitary_test.py index 7330c807f..c189d2d79 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/unitary_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/unitary_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py b/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py index 0d9e76e7e..9d1210aa1 100644 --- a/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py +++ b/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/high_level/noisy_controlled_pqc_test.py b/tensorflow_quantum/python/layers/high_level/noisy_controlled_pqc_test.py index 57ecf6cff..eb12c0696 100644 --- a/tensorflow_quantum/python/layers/high_level/noisy_controlled_pqc_test.py +++ b/tensorflow_quantum/python/layers/high_level/noisy_controlled_pqc_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/high_level/noisy_pqc.py b/tensorflow_quantum/python/layers/high_level/noisy_pqc.py index 05cb535e8..4e841bc21 100644 --- a/tensorflow_quantum/python/layers/high_level/noisy_pqc.py +++ b/tensorflow_quantum/python/layers/high_level/noisy_pqc.py @@ -132,17 +132,17 @@ class NoisyPQC(tf.keras.layers.Layer): """ def __init__( - self, - model_circuit, - operators, - *, - repetitions=None, - sample_based=None, - differentiator=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi), - regularizer=None, - constraint=None, - **kwargs, + self, + model_circuit, + operators, + *, + repetitions=None, + sample_based=None, + differentiator=None, + initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi), + regularizer=None, + constraint=None, + **kwargs, ): """Instantiate this layer. diff --git a/tensorflow_quantum/python/layers/high_level/noisy_pqc_test.py b/tensorflow_quantum/python/layers/high_level/noisy_pqc_test.py index 112838912..b9014e54a 100644 --- a/tensorflow_quantum/python/layers/high_level/noisy_pqc_test.py +++ b/tensorflow_quantum/python/layers/high_level/noisy_pqc_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/layers/high_level/pqc.py b/tensorflow_quantum/python/layers/high_level/pqc.py index e370be294..0f38c35a7 100644 --- a/tensorflow_quantum/python/layers/high_level/pqc.py +++ b/tensorflow_quantum/python/layers/high_level/pqc.py @@ -131,17 +131,17 @@ class PQC(tf.keras.layers.Layer): """ def __init__( - self, - model_circuit, - operators, - *, - repetitions=None, - backend='noiseless', - differentiator=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi), - regularizer=None, - constraint=None, - **kwargs, + self, + model_circuit, + operators, + *, + repetitions=None, + backend='noiseless', + differentiator=None, + initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi), + regularizer=None, + constraint=None, + **kwargs, ): """Instantiate this layer. diff --git a/tensorflow_quantum/python/layers/high_level/pqc_test.py b/tensorflow_quantum/python/layers/high_level/pqc_test.py index e6d831c66..ce31fcda5 100644 --- a/tensorflow_quantum/python/layers/high_level/pqc_test.py +++ b/tensorflow_quantum/python/layers/high_level/pqc_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/optimizers/rotosolve_minimizer.py b/tensorflow_quantum/python/optimizers/rotosolve_minimizer.py index 5bf23f6a0..24007e062 100755 --- a/tensorflow_quantum/python/optimizers/rotosolve_minimizer.py +++ b/tensorflow_quantum/python/optimizers/rotosolve_minimizer.py @@ -259,8 +259,8 @@ def _body(state): next_state_params = post_state.to_dict() next_state_params.update({ "converged": (tf.abs(post_state.objective_value - - post_state.objective_value_prev) < - post_state.tolerance), + post_state.objective_value_prev) + < post_state.tolerance), "num_iterations": post_state.num_iterations + 1, }) return [RotosolveOptimizerResults(**next_state_params)] diff --git a/tensorflow_quantum/python/optimizers/rotosolve_minimizer_test.py b/tensorflow_quantum/python/optimizers/rotosolve_minimizer_test.py index 1d3a2965f..fdeb67846 100755 --- a/tensorflow_quantum/python/optimizers/rotosolve_minimizer_test.py +++ b/tensorflow_quantum/python/optimizers/rotosolve_minimizer_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position @@ -145,7 +146,7 @@ def convert_to_circuit(input_data): a, b = sympy.symbols('a b') # parameters for the circuit circuit = cirq.Circuit( cirq.rx(a).on(q0), - cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1)) + cirq.ry(b).on(q1), cirq.CNOT(q0, q1)) # Build the Keras model. model = tf.keras.Sequential([ diff --git a/tensorflow_quantum/python/optimizers/spsa_minimizer_test.py b/tensorflow_quantum/python/optimizers/spsa_minimizer_test.py index 1de86d86c..e25153dc5 100644 --- a/tensorflow_quantum/python/optimizers/spsa_minimizer_test.py +++ b/tensorflow_quantum/python/optimizers/spsa_minimizer_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position @@ -248,7 +249,7 @@ def convert_to_circuit(input_data): a, b = sympy.symbols('a b') # parameters for the circuit circuit = cirq.Circuit( cirq.rx(a).on(q0), - cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1)) + cirq.ry(b).on(q1), cirq.CNOT(q0, q1)) # Build the Keras model. model = tf.keras.Sequential([ diff --git a/tensorflow_quantum/python/quantum_context_test.py b/tensorflow_quantum/python/quantum_context_test.py index 5b219b1dc..dde3daa77 100644 --- a/tensorflow_quantum/python/quantum_context_test.py +++ b/tensorflow_quantum/python/quantum_context_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/tensorflow_quantum/python/util.py b/tensorflow_quantum/python/util.py index 92ebeabee..006dce8bd 100644 --- a/tensorflow_quantum/python/util.py +++ b/tensorflow_quantum/python/util.py @@ -577,8 +577,9 @@ def gate_approx_eq(gate_true, gate_deser, atol=1e-5): raise TypeError(f"`gate_true` not a cirq gate, got {type(gate_true)}") if not isinstance(gate_deser, cirq.Gate): raise TypeError(f"`gate_deser` not a cirq gate, got {type(gate_deser)}") - if isinstance(gate_true, cirq.ControlledGate) != isinstance( - gate_deser, cirq.ControlledGate): + if isinstance(gate_true, + cirq.ControlledGate) != isinstance(gate_deser, + cirq.ControlledGate): return False if isinstance(gate_true, cirq.ControlledGate): if gate_true.control_qid_shape != gate_deser.control_qid_shape: diff --git a/tensorflow_quantum/python/util_test.py b/tensorflow_quantum/python/util_test.py index 3d4e2dd76..dc3f89874 100644 --- a/tensorflow_quantum/python/util_test.py +++ b/tensorflow_quantum/python/util_test.py @@ -16,6 +16,7 @@ # Remove PYTHONPATH collisions for protobuf. # pylint: disable=wrong-import-position import sys + NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position diff --git a/third_party/tf/qsim.patch b/third_party/tf/qsim.patch new file mode 100644 index 000000000..01f4423a4 --- /dev/null +++ b/third_party/tf/qsim.patch @@ -0,0 +1,75 @@ +diff --git WORKSPACE WORKSPACE +index 3bbcd68..e69de29 100644 +--- WORKSPACE ++++ WORKSPACE +@@ -1,70 +0,0 @@ +-load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +- +-http_archive( +- name = "platforms", +- urls = [ +- "https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.6/platforms-0.0.6.tar.gz", +- "https://github.com/bazelbuild/platforms/releases/download/0.0.6/platforms-0.0.6.tar.gz", +- ], +- sha256 = "5308fc1d8865406a49427ba24a9ab53087f17f5266a7aabbfc28823f3916e1ca", +-) +- +-http_archive( +- name = "com_google_googletest", +- sha256 = "ab78fa3f912d44d38b785ec011a25f26512aaedc5291f51f3807c592b506d33a", +- strip_prefix = "googletest-58d77fa8070e8cec2dc1ed015d66b454c8d78850", +- url = "https://github.com/google/googletest/archive/58d77fa8070e8cec2dc1ed015d66b454c8d78850.zip", +-) +- +-# Required for testing compatibility with TF Quantum: +-# https://github.com/tensorflow/quantum +-http_archive( +- name = "org_tensorflow", +- sha256 = "e52cda3bae45f0ae0fccd4055e9fa29892b414f70e2df94df9a3a10319c75fff", +- strip_prefix = "tensorflow-2.11.0", +- urls = [ +- "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.11.0.zip", +- ], +-) +- +-load("@org_tensorflow//tensorflow:workspace3.bzl", "workspace") +- +-workspace() +- +-load("@org_tensorflow//tensorflow:workspace2.bzl", "workspace") +- +-workspace() +- +-load("@org_tensorflow//tensorflow:workspace1.bzl", "workspace") +- +-workspace() +- +-load("@org_tensorflow//tensorflow:workspace0.bzl", "workspace") +- +-workspace() +- +- +-EIGEN_COMMIT = "3bb6a48d8c171cf20b5f8e48bfb4e424fbd4f79e" +-EIGEN_SHA256 = "eca9847b3fe6249e0234a342b78f73feec07d29f534e914ba5f920f3e09383a3" +- +- +-http_archive( +- name = "eigen", +- build_file_content = """ +-cc_library( +- name = "eigen3", +- textual_hdrs = glob(["Eigen/**", "unsupported/**"]), +- visibility = ["//visibility:public"], +-) +- """, +- sha256 = EIGEN_SHA256, +- strip_prefix = "eigen-{commit}".format(commit = EIGEN_COMMIT), +- urls = [ +- "https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/{commit}/eigen-{commit}.tar.gz".format(commit = EIGEN_COMMIT), +- "https://gitlab.com/libeigen/eigen/-/archive/{commit}/eigen-{commit}.tar.gz".format(commit = EIGEN_COMMIT), +- ], +-) +- +-load("//third_party/cuquantum:cuquantum_configure.bzl", "cuquantum_configure") +- +-cuquantum_configure(name = "local_config_cuquantum") \ No newline at end of file diff --git a/third_party/tf/tf.patch b/third_party/tf/tf.patch new file mode 100644 index 000000000..4ce7dc753 --- /dev/null +++ b/third_party/tf/tf.patch @@ -0,0 +1,74 @@ +diff --git tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +index a2bdd6a7eed..ec25c23d8d4 100644 +--- tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl ++++ tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +@@ -2,7 +2,7 @@ + + load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64.bzl", "remote_aarch64_configure") + load("//third_party/remote_config:remote_platform_configure.bzl", "remote_platform_configure") +-load("//third_party/py:python_configure.bzl", "remote_python_configure") ++load("//third_party/py/non_hermetic:python_configure.bzl", "remote_python_configure") + + def ml2014_tf_aarch64_configs(name_container_map, env): + for name, container in name_container_map.items(): +diff --git tensorflow/tools/toolchains/remote_config/rbe_config.bzl tensorflow/tools/toolchains/remote_config/rbe_config.bzl +index 9f71a414bf7..57f70752323 100644 +--- tensorflow/tools/toolchains/remote_config/rbe_config.bzl ++++ tensorflow/tools/toolchains/remote_config/rbe_config.bzl +@@ -1,6 +1,6 @@ + """Macro that creates external repositories for remote config.""" + +-load("//third_party/py:python_configure.bzl", "local_python_configure", "remote_python_configure") ++load("//third_party/py/non_hermetic:python_configure.bzl", "local_python_configure", "remote_python_configure") + load("//third_party/gpus:cuda_configure.bzl", "remote_cuda_configure") + load("//third_party/nccl:nccl_configure.bzl", "remote_nccl_configure") + load("//third_party/gpus:rocm_configure.bzl", "remote_rocm_configure") +diff --git tensorflow/workspace2.bzl tensorflow/workspace2.bzl +index 7e9faa558a4..5b18cb0969a 100644 +--- tensorflow/workspace2.bzl ++++ tensorflow/workspace2.bzl +@@ -8,7 +8,7 @@ load("//third_party/gpus:rocm_configure.bzl", "rocm_configure") + load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure") + load("//third_party/nccl:nccl_configure.bzl", "nccl_configure") + load("//third_party/git:git_configure.bzl", "git_configure") +-load("//third_party/py:python_configure.bzl", "python_configure") ++load("//third_party/py/non_hermetic:python_configure.bzl", "python_configure") + load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure") + load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64_compiler_configure.bzl", "aarch64_compiler_configure") + load("//tensorflow/tools/toolchains:cpus/arm/arm_compiler_configure.bzl", "arm_compiler_configure") +diff --git third_party/py/non_hermetic/python_configure.bzl third_party/py/non_hermetic/python_configure.bzl +index 300cbfb6c71..09d98505dd9 100644 +--- third_party/py/non_hermetic/python_configure.bzl ++++ third_party/py/non_hermetic/python_configure.bzl +@@ -206,7 +206,7 @@ def _create_local_python_repository(repository_ctx): + # Resolve all labels before doing any real work. Resolving causes the + # function to be restarted with all previous state being lost. This + # can easily lead to a O(n^2) runtime in the number of labels. +- build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) ++ build_tpl = repository_ctx.path(Label("//third_party/py/non_hermetic:BUILD.tpl")) + + python_bin = get_python_bin(repository_ctx) + _check_python_bin(repository_ctx, python_bin) +diff --git third_party/py/numpy/BUILD third_party/py/numpy/BUILD +index 97c7907fc38..c80cc5287bc 100644 +--- third_party/py/numpy/BUILD ++++ third_party/py/numpy/BUILD +@@ -2,14 +2,15 @@ licenses(["restricted"]) + + package(default_visibility = ["//visibility:public"]) + +-alias( ++py_library( + name = "numpy", +- actual = "@pypi_numpy//:pkg", ++ srcs = ["tf_numpy_dummy.py"], ++ srcs_version = "PY3", + ) + + alias( + name = "headers", +- actual = "@pypi_numpy//:numpy_headers", ++ actual = "@local_config_python//:numpy_headers", + ) + + genrule( \ No newline at end of file From 10c9c9b1b02d65b9abd12ea1bf663e6c167097d8 Mon Sep 17 00:00:00 2001 From: MichaelBroughton Date: Fri, 17 May 2024 10:45:48 -0700 Subject: [PATCH 2/7] Bump version to 0.7.4. (#810) --- docs/tutorials/barren_plateaus.ipynb | 2 +- docs/tutorials/gradients.ipynb | 2 +- docs/tutorials/hello_many_worlds.ipynb | 2 +- docs/tutorials/mnist.ipynb | 2 +- docs/tutorials/noise.ipynb | 2 +- docs/tutorials/qcnn.ipynb | 2 +- docs/tutorials/quantum_data.ipynb | 2 +- docs/tutorials/quantum_reinforcement_learning.ipynb | 2 +- docs/tutorials/research_tools.ipynb | 2 +- release/setup.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/tutorials/barren_plateaus.ipynb b/docs/tutorials/barren_plateaus.ipynb index dfbac0fb2..94be3a950 100644 --- a/docs/tutorials/barren_plateaus.ipynb +++ b/docs/tutorials/barren_plateaus.ipynb @@ -120,7 +120,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow-quantum==0.7.2" + "!pip install tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/gradients.ipynb b/docs/tutorials/gradients.ipynb index fa0525475..5eef20cb9 100644 --- a/docs/tutorials/gradients.ipynb +++ b/docs/tutorials/gradients.ipynb @@ -122,7 +122,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow-quantum==0.7.2" + "!pip install tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/hello_many_worlds.ipynb b/docs/tutorials/hello_many_worlds.ipynb index d069e3455..49c88893b 100644 --- a/docs/tutorials/hello_many_worlds.ipynb +++ b/docs/tutorials/hello_many_worlds.ipynb @@ -129,7 +129,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow-quantum==0.7.2" + "!pip install tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/mnist.ipynb b/docs/tutorials/mnist.ipynb index 4cccc4b5b..7efe3abe4 100644 --- a/docs/tutorials/mnist.ipynb +++ b/docs/tutorials/mnist.ipynb @@ -120,7 +120,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow-quantum==0.7.2" + "!pip install tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/noise.ipynb b/docs/tutorials/noise.ipynb index 0c79ff02e..4e40e72a3 100644 --- a/docs/tutorials/noise.ipynb +++ b/docs/tutorials/noise.ipynb @@ -83,7 +83,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.2" + "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/qcnn.ipynb b/docs/tutorials/qcnn.ipynb index bc5da0a44..7b566a5b0 100644 --- a/docs/tutorials/qcnn.ipynb +++ b/docs/tutorials/qcnn.ipynb @@ -131,7 +131,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow-quantum==0.7.2" + "!pip install tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/quantum_data.ipynb b/docs/tutorials/quantum_data.ipynb index 5e6e10fdb..8877807dc 100644 --- a/docs/tutorials/quantum_data.ipynb +++ b/docs/tutorials/quantum_data.ipynb @@ -111,7 +111,7 @@ } ], "source": [ - "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.2" + "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/quantum_reinforcement_learning.ipynb b/docs/tutorials/quantum_reinforcement_learning.ipynb index ef3d3d0dc..bc7644883 100644 --- a/docs/tutorials/quantum_reinforcement_learning.ipynb +++ b/docs/tutorials/quantum_reinforcement_learning.ipynb @@ -143,7 +143,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow-quantum==0.7.2" + "!pip install tensorflow-quantum==0.7.3" ] }, { diff --git a/docs/tutorials/research_tools.ipynb b/docs/tutorials/research_tools.ipynb index 8efd7a89c..8f091f14c 100644 --- a/docs/tutorials/research_tools.ipynb +++ b/docs/tutorials/research_tools.ipynb @@ -83,7 +83,7 @@ }, "outputs": [], "source": [ - "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.2 tensorboard_plugin_profile==2.4.0" + "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3 tensorboard_plugin_profile==2.15.0" ] }, { diff --git a/release/setup.py b/release/setup.py index d02d30650..fd11b6fd0 100644 --- a/release/setup.py +++ b/release/setup.py @@ -55,7 +55,7 @@ def finalize_options(self): # placed as extra to not have required overwrite existing nightly installs if # they exist. EXTRA_PACKAGES = ['tensorflow == 2.15.0'] -CUR_VERSION = '0.7.3' +CUR_VERSION = '0.7.4' class BinaryDistribution(Distribution): From 605d2829131d02758fecd68c7c8b035627eecc6d Mon Sep 17 00:00:00 2001 From: Michael Hucka Date: Sun, 1 Dec 2024 18:51:06 -0800 Subject: [PATCH 3/7] Minimum changes to make CI workflows work again (#814) * Update ubuntu version used for runner Ubuntu 16.04 is no longer supported by GitHub. Updated the runner to use Ubuntu 20.04. * Disable memory leak tests for now The current failures in the Cirq compatibility CI workflow are limited to the Address Sanitizer (ASAN) tests in `scripts/msan_test.sh`. They started happening only when we updated the version of Linux used by the workflow from Ubuntu 16.04 to 20.04, because GitHub no longer offers the Ubuntu 16 runners. After spending a ridiculous amount of time testing various combinations of TensorFlow, TensorFlow Quantum, and compiler toolchains on a more recent Linux, my conclusion is that the ASAN failures stem from differences in the toolchains used to produce the copy of TensorFlow 2.15.0 we get from PyPI, and the current toolchain used to compile TFQ on GitHub. This conclusion comes from the fact if I build a local copy of TensorFlow, and then build TFQ against that, using Clang for everything, the ASAN failures go away. Given that we can't build TensorFlow as part of this workflow (it takes 2 hours to build using 24-cores on a fast machine), it's not clear what can be done to stop the ASAN failures. I'm temporarily commenting out the leak tests in this workflow so that we can proceed on doing other updates and releasing a new version of TFQ. However, this needs to be revisited at some point. --- .github/workflows/ci.yaml | 39 +++++++++++++---------- .github/workflows/cirq_compatibility.yaml | 2 +- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0b9ca153c..763fec9e0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -70,23 +70,28 @@ jobs: - name: Full Library Test run: ./scripts/test_all.sh - leak-tests: - name: Memory Leak tests - runs-on: ubuntu-20.04 - needs: [lint, format] - - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.10' - architecture: 'x64' - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Leak Test qsim and src - run: ./scripts/msan_test.sh + # 2024-11-30 [mhucka] temporarily turning off leak-tests because it produces + # false positives on GH that we can't immediately address. TODO: if updating + # TFQ to use Clang and the latest TF does not resolve this, find a way to + # skip the handful of failing tests and renable the rest of the msan tests. + # + # leak-tests: + # name: Memory Leak tests + # runs-on: ubuntu-20.04 + # needs: [lint, format] + # + # steps: + # - uses: actions/checkout@v1 + # - uses: actions/setup-python@v1 + # with: + # python-version: '3.10' + # architecture: 'x64' + # - name: Install Bazel on CI + # run: ./scripts/ci_install.sh + # - name: Configure CI TF + # run: echo "Y\n" | ./configure.sh + # - name: Leak Test qsim and src + # run: ./scripts/msan_test.sh tutorials-test: name: Tutorial tests diff --git a/.github/workflows/cirq_compatibility.yaml b/.github/workflows/cirq_compatibility.yaml index f5e5b9629..c7cfa788f 100644 --- a/.github/workflows/cirq_compatibility.yaml +++ b/.github/workflows/cirq_compatibility.yaml @@ -7,7 +7,7 @@ on: jobs: consistency: name: Nightly Compatibility - runs-on: ubuntu-16.04 + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 From 5df3478daeaffd29282832176241c5f090e9c8d7 Mon Sep 17 00:00:00 2001 From: mhucka Date: Tue, 3 Dec 2024 02:48:11 +0000 Subject: [PATCH 4/7] Enhance cirq_compatibility.yml This refactors the entire workflow. Improvements: - Caches pip-downloaded Python packages - Uses a Bazel setup action that caches Bazel installations - Adds more debugging output - Adds a workflow_dispatch target to enable manual invocation - Adds options to manual invocation to control caching --- .github/workflows/cirq_compatibility.yaml | 184 ++++++++++++++++++++-- scripts/test_all.sh | 2 +- 2 files changed, 169 insertions(+), 17 deletions(-) diff --git a/.github/workflows/cirq_compatibility.yaml b/.github/workflows/cirq_compatibility.yaml index c7cfa788f..9aa17127b 100644 --- a/.github/workflows/cirq_compatibility.yaml +++ b/.github/workflows/cirq_compatibility.yaml @@ -1,24 +1,176 @@ -name: Cirq Compatibility +# Copyright 2024 The TensorFlow Quantum Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Summary: GitHub CI workflow for testing TFQ against Cirq releases +# +# This workflow is executed every night on a schedule. By default, this +# workflow will save Bazel build artifacts if an error occurs during a run. +# +# For testing, this workflow can be invoked manually from the GitHub page at +# https://github.com/tensorflow/quantum/actions/workflows/cirq_compatibility.yaml +# Clicking the "Run workflow" button there will present a form interface with +# options for overridding some of the parameters for the run. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +name: Cirq compatibility nightly tests + +# Default values. These can be overridden when workflow dispatch is used. +env: + # Python version to test against. + py_version: '3.10' + # Bazel version. Note: this needs to match what is used in TF & TFQ. + bazel_version: 6.5.0 + # Machine architecture. + arch: x64 + # Additional .bazelrc options to use. + bazelrc_additions: | + common --announce_rc + build --verbose_failures on: + # Nightly runs. schedule: - - cron: "0 0 * * *" + - cron: 0 0 * * * + # Manual on-demand invocations. + workflow_dispatch: + inputs: + py_version: + description: Version of Python to use + bazel_version: + description: Version of Bazel Python to use + arch: + description: Computer architecture to use + use_bazel_disk_cache: + description: Use Bazel disk_cache between runs? + type: boolean + default: true + cache_bazel_tests: + description: Allow Bazel to cache test results? + type: boolean + default: true + save_artifacts: + description: Make Bazel build outputs downloadable? + type: boolean + default: true + pull_request: + branches: + - master jobs: - consistency: - name: Nightly Compatibility + test-compatibility: + name: Run TFQ tests runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - name: Check out a copy of the TFQ git repository + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + + - name: Set up Python + id: python + uses: actions/setup-python@v5 + with: + python-version: ${{github.event.inputs.py_version || env.py_version}} + architecture: ${{github.event.inputs.arch || env.arch}} + cache: pip + + - name: Install TensorFlow Quantum dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements.txt + + - name: Install the nightly build version of Cirq + run: | + pip install -U cirq --pre + + - name: Configure Bazel options + run: | + # If we didn't get a cache hit on the installed Python environment, + # something's changed, and we want to make sure to re-run all tests. + if [[ "${{steps.python.outputs.cache-hit}}" == "true" + && "${{github.event.inputs.cache_bazel_tests}}" != "false" ]]; then + echo "cache_bazel_tests=auto" >> "$GITHUB_ENV" + else + echo "cache_bazel_tests=no" >> "$GITHUB_ENV" + fi + # Use the disk cache unless told not to. + if [[ "${{github.event.inputs.use_bazel_disk_cache}}" != "false" ]]; then + echo "use_bazel_disk_cache=true" >> "$GITHUB_ENV" + else + echo "use_bazel_disk_cache=false" >> "$GITHUB_ENV" + fi + + - name: Set up Bazel with caching + if: env.use_bazel_disk_cache == 'true' + uses: bazel-contrib/setup-bazel@0.9.1 + env: + USE_BAZEL_VERSION: ${{github.event.inputs.bazel_version || env.bazel_version}} + with: + disk-cache: ${{github.workflow}} + bazelisk-cache: true + external-cache: true + repository-cache: true + bazelrc: | + ${{env.bazelrc_additions}} + test --cache_test_results=${{env.cache_bazel_tests}} + + - name: Set up Bazel without caching + if: env.use_bazel_disk_cache == 'false' + uses: bazel-contrib/setup-bazel@0.9.1 + env: + USE_BAZEL_VERSION: ${{github.event.inputs.bazel_version || env.bazel_version}} + with: + bazelrc: | + ${{env.bazelrc_additions}} + test --cache_test_results=${{env.cache_bazel_tests}} + + - name: Configure TFQ + run: | + set -x -e + # Save information to the run log, in case it's needed for debugging. + which python + python --version + python -c 'import site; print(site.getsitepackages())' + python -c 'import tensorflow; print(tensorflow.version.VERSION)' + python -c 'import cirq; print(cirq.__version__)' + # Run the TFQ configuration script. + printf "Y\n" | ./configure.sh + + - name: Run TFQ unit tests + # TODO: when the msan tests are working again, replace the "touch" + # line with ./scripts/msan_test.sh 2>&1 | tee msan-tests-output.log + run: | + set -x -e + ./scripts/test_all.sh 2>&1 | tee unit-tests-output.log + touch msan-tests-output.log + + - name: Make Bazel artifacts downloadable (if desired) + if: >- + github.event.inputs.save_artifacts == 'true' + && (failure() || github.event_name == 'workflow_dispatch') + uses: actions/upload-artifact@v4 with: - python-version: '3.8' - architecture: 'x64' - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Install Cirq nightly - run: pip install -U cirq --pre - - name: Nightly tests - run: ./scripts/test_all.sh + name: bazel-out + retention-days: 7 + include-hidden-files: true + path: | + unit-tests-output.log + msan-tests-output.log + /home/runner/.bazel/execroot/__main__/bazel-out/ + !/home/runner/.bazel/execroot/__main__/bazel-out/**/*.so + !/home/runner/.bazel/execroot/__main__/bazel-out/**/*.o + !/home/runner/.bazel/execroot/__main__/bazel-out/**/_objs + !/home/runner/.bazel/execroot/__main__/bazel-out/**/_solib_k8 diff --git a/scripts/test_all.sh b/scripts/test_all.sh index 7a9fc7824..ba9225df2 100755 --- a/scripts/test_all.sh +++ b/scripts/test_all.sh @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== echo "Testing All Bazel py_test and cc_tests."; -test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --notest_keep_going --test_output=errors //tensorflow_quantum/...) +test_outputs=$(bazel test --test_timeout=3000 --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" //tensorflow_quantum/...) exit_code=$? if [ "$exit_code" == "0" ]; then echo "Testing Complete!"; From bced7652aa7c3b4147fd203159b6a00c75b1f560 Mon Sep 17 00:00:00 2001 From: mhucka Date: Wed, 4 Dec 2024 00:04:53 +0000 Subject: [PATCH 5/7] Restore test_all.sh but move timeout setting to cirq_compat Changing the settings in test_all.sh should be left to a separate PR. Also, my setting for the timeout should go into cirq_compatibility.yml. --- .github/workflows/cirq_compatibility.yaml | 1 + scripts/test_all.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cirq_compatibility.yaml b/.github/workflows/cirq_compatibility.yaml index 9aa17127b..c4fd72df4 100644 --- a/.github/workflows/cirq_compatibility.yaml +++ b/.github/workflows/cirq_compatibility.yaml @@ -38,6 +38,7 @@ env: bazelrc_additions: | common --announce_rc build --verbose_failures + test --test_timeout=3000 on: # Nightly runs. diff --git a/scripts/test_all.sh b/scripts/test_all.sh index ba9225df2..5d5405fac 100755 --- a/scripts/test_all.sh +++ b/scripts/test_all.sh @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== echo "Testing All Bazel py_test and cc_tests."; -test_outputs=$(bazel test --test_timeout=3000 --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" //tensorflow_quantum/...) +test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --test_output=errors --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" //tensorflow_quantum/...) exit_code=$? if [ "$exit_code" == "0" ]; then echo "Testing Complete!"; From 0b351f5a62c280259dbcf0483473a16926b1714c Mon Sep 17 00:00:00 2001 From: mhucka Date: Wed, 4 Dec 2024 00:33:54 +0000 Subject: [PATCH 6/7] Rename a couple of things for consistency Not all the tests are actually unit tests, so let's not call it that. Also, don't say "nightly" because if we ever change the schedule, it wouldn't be accurate anymore. --- .github/workflows/cirq_compatibility.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cirq_compatibility.yaml b/.github/workflows/cirq_compatibility.yaml index c4fd72df4..134687975 100644 --- a/.github/workflows/cirq_compatibility.yaml +++ b/.github/workflows/cirq_compatibility.yaml @@ -24,7 +24,7 @@ # options for overridding some of the parameters for the run. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -name: Cirq compatibility nightly tests +name: Cirq compatibility tests # Default values. These can be overridden when workflow dispatch is used. env: @@ -150,12 +150,12 @@ jobs: # Run the TFQ configuration script. printf "Y\n" | ./configure.sh - - name: Run TFQ unit tests + - name: Run TFQ tests # TODO: when the msan tests are working again, replace the "touch" # line with ./scripts/msan_test.sh 2>&1 | tee msan-tests-output.log run: | set -x -e - ./scripts/test_all.sh 2>&1 | tee unit-tests-output.log + ./scripts/test_all.sh 2>&1 | tee main-tests-output.log touch msan-tests-output.log - name: Make Bazel artifacts downloadable (if desired) @@ -168,7 +168,7 @@ jobs: retention-days: 7 include-hidden-files: true path: | - unit-tests-output.log + main-tests-output.log msan-tests-output.log /home/runner/.bazel/execroot/__main__/bazel-out/ !/home/runner/.bazel/execroot/__main__/bazel-out/**/*.so From 0df6595e5c055045cedcfc73359157796401a2d1 Mon Sep 17 00:00:00 2001 From: mhucka Date: Wed, 4 Dec 2024 00:38:20 +0000 Subject: [PATCH 7/7] Remove testing elements Pull_request was only temporary so that I could see the workflow run. Once this is in the main branch, I should be able to use the workflow dispatch feature. --- .github/workflows/cirq_compatibility.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/cirq_compatibility.yaml b/.github/workflows/cirq_compatibility.yaml index 134687975..1ad2e4195 100644 --- a/.github/workflows/cirq_compatibility.yaml +++ b/.github/workflows/cirq_compatibility.yaml @@ -65,9 +65,6 @@ on: description: Make Bazel build outputs downloadable? type: boolean default: true - pull_request: - branches: - - master jobs: test-compatibility: @@ -76,9 +73,6 @@ jobs: steps: - name: Check out a copy of the TFQ git repository uses: actions/checkout@v4 - with: - ref: ${{github.event.pull_request.head.ref}} - repository: ${{github.event.pull_request.head.repo.full_name}} - name: Set up Python id: python