Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BUG] default.tensor errors on operators it says it supports #6415

Open
1 task done
albi3ro opened this issue Oct 18, 2024 · 0 comments
Open
1 task done

[BUG] default.tensor errors on operators it says it supports #6415

albi3ro opened this issue Oct 18, 2024 · 0 comments
Labels
bug 🐛 Something isn't working

Comments

@albi3ro
Copy link
Contributor

albi3ro commented Oct 18, 2024

Expected behavior

I would expect a pennylane device to support any operator that it specifies that it supports.

Actual behavior

With MultiRZ, we get:

@qml.qnode(qml.device('default.tensor'))
def circuit(x):
    qml.MultiRZ(x, wires=0)
    return qml.expval(qml.Z(0))

circuit(0.5)
ValueError: dimensions in operand 0 for collapsing index 'c' don't match (1 != 2)
@qml.qnode(qml.device('default.tensor'))
def circuit(x):
    qml.GlobalPhase(x)
    return qml.expval(qml.Z(0))

circuit(0.5)
IndexError: list index out of range

Additional information

Both of these operations are included in the set of supported operations and validated by preprocessing. A solution to this bug is simply removing them from the list of supported operations.

Source code

No response

Tracebacks

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[5], line 6
      3     qml.MultiRZ(x, wires=0)
      4     return qml.expval(qml.Z(0))
----> 6 circuit(0.5)

File ~/Prog/pennylane/pennylane/workflow/qnode.py:987, in QNode.__call__(self, *args, **kwargs)
    985 if qml.capture.enabled():
    986     return qml.capture.qnode_call(self, *args, **kwargs)
--> 987 return self._impl_call(*args, **kwargs)

File ~/Prog/pennylane/pennylane/workflow/qnode.py:977, in QNode._impl_call(self, *args, **kwargs)
    974     self._interface = interface
    976 try:
--> 977     res = self._execution_component(args, kwargs)
    978 finally:
    979     if old_interface == "auto":

File ~/Prog/pennylane/pennylane/workflow/qnode.py:935, in QNode._execution_component(self, args, kwargs)
    932 interface = None if self.interface == "numpy" else self.interface
    934 # pylint: disable=unexpected-keyword-arg
--> 935 res = qml.execute(
    936     (self._tape,),
    937     device=self.device,
    938     gradient_fn=gradient_fn,
    939     interface=interface,
    940     transform_program=full_transform_program,
    941     inner_transform=inner_transform_program,
    942     config=config,
    943     gradient_kwargs=gradient_kwargs,
    944     **execute_kwargs,
    945 )
    946 res = res[0]
    948 # convert result to the interface in case the qfunc has no parameters

File ~/Prog/pennylane/pennylane/workflow/execution.py:528, in execute(tapes, device, gradient_fn, interface, transform_program, inner_transform, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, device_vjp, mcm_config)
    526 # Exiting early if we do not need to deal with an interface boundary
    527 if no_interface_boundary_required:
--> 528     results = inner_execute(tapes)
    529     return post_processing(results)
    531 if (
    532     device_vjp
    533     and getattr(device, "short_name", "") in ("lightning.gpu", "lightning.kokkos")
    534     and interface in jpc_interfaces
    535 ):  # pragma: no cover

File ~/Prog/pennylane/pennylane/workflow/execution.py:207, in _make_inner_execute.<locals>.inner_execute(tapes, **_)
    204 transformed_tapes, transform_post_processing = transform_program(tapes)
    206 if transformed_tapes:
--> 207     results = device.execute(transformed_tapes, execution_config=execution_config)
    208 else:
    209     results = ()

File ~/Prog/pennylane/pennylane/devices/modifiers/simulator_tracking.py:30, in _track_execute.<locals>.execute(self, circuits, execution_config)
     28 @wraps(untracked_execute)
     29 def execute(self, circuits, execution_config=DefaultExecutionConfig):
---> 30     results = untracked_execute(self, circuits, execution_config)
     31     if isinstance(circuits, QuantumScript):
     32         batch = (circuits,)

File ~/Prog/pennylane/pennylane/devices/modifiers/single_tape_support.py:32, in _make_execute.<locals>.execute(self, circuits, execution_config)
     30     is_single_circuit = True
     31     circuits = (circuits,)
---> 32 results = batch_execute(self, circuits, execution_config)
     33 return results[0] if is_single_circuit else results

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:649, in DefaultTensor.execute(self, circuits, execution_config)
    643         raise WireError(
    644             "Mismatch between circuit and device wires. "
    645             f"Circuit has wires {circuit.wires.tolist()}. "
    646             f"Tensor on device has wires {self.wires.tolist()}"
    647         )
    648     circuit = circuit.map_to_standard_wires()
--> 649     results.append(self.simulate(circuit))
    651 return tuple(results)

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:690, in DefaultTensor.simulate(self, circuit)
    687     self._quimb_circuit = self._initial_quimb_circuit(wires)
    689 for op in operations:
--> 690     self._apply_operation(op)
    692 if not circuit.shots:
    693     if len(circuit.measurements) == 1:

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:707, in DefaultTensor._apply_operation(self, op)
    699 def _apply_operation(self, op: qml.operation.Operator) -> None:
    700     """Apply a single operator to the circuit.
    701 
    702     Internally it uses ``quimb``'s ``apply_gate`` method. This method modifies the tensor state of the device.
   (...)
    705         op (Operator): The operation to apply.
    706     """
--> 707     apply_operation_core(op, self)

File /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/functools.py:907, in singledispatch.<locals>.wrapper(*args, **kw)
    903 if not args:
    904     raise TypeError(f'{funcname} requires at least '
    905                     '1 positional argument')
--> 907 return dispatch(args[0].__class__)(*args, **kw)

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:936, in apply_operation_core_multirz(ops, device)
    933 @apply_operation_core.register
    934 def apply_operation_core_multirz(ops: qml.MultiRZ, device):
    935     """Dispatcher for _apply_operation."""
--> 936     apply_operation_core(qml.PauliRot(ops.parameters[0], "Z" * len(ops.wires), ops.wires), device)

File /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/functools.py:907, in singledispatch.<locals>.wrapper(*args, **kw)
    903 if not args:
    904     raise TypeError(f'{funcname} requires at least '
    905                     '1 positional argument')
--> 907 return dispatch(args[0].__class__)(*args, **kw)

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:967, in apply_operation_core_paulirot(ops, device)
    965 mpo = qtn.MatrixProductOperator(arrays=arrays, sites=sites)
    966 mpo = mpo.fill_empty_sites()
--> 967 device._quimb_circuit._psi = mpo.apply(
    968     device._quimb_circuit.psi,
    969     max_bond=device._max_bond_dim,
    970     cutoff=device._cutoff,
    971 )

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:3780, in MatrixProductOperator.apply(self, other, compress, **compress_opts)
   3733 r"""Act with this MPO on another MPO or MPS, such that the resulting
   3734 object has the same tensor network structure/indices as ``other``.
   3735 
   (...)
   3777 MatrixProductOperator or MatrixProductState
   3778 """
   3779 if isinstance(other, MatrixProductState):
-> 3780     return self._apply_mps(other, compress=compress, **compress_opts)
   3781 elif isinstance(other, MatrixProductOperator):
   3782     return self._apply_mpo(other, compress=compress, **compress_opts)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:3713, in MatrixProductOperator._apply_mps(self, other, compress, contract, **compress_opts)
   3710 def _apply_mps(
   3711     self, other, compress=False, contract=True, **compress_opts
   3712 ):
-> 3713     return tensor_network_apply_op_vec(
   3714         A=self,
   3715         x=other,
   3716         compress=compress,
   3717         contract=contract,
   3718         **compress_opts,
   3719     )

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_arbgeom.py:190, in tensor_network_apply_op_vec(A, x, which_A, contract, fuse_multibonds, compress, inplace, inplace_A, **compress_opts)
    187 if contract:
    188     # optionally contract all tensor at each site
    189     for site in sites_present_in_A:
--> 190         x ^= site
    192     if fuse_multibonds:
    193         x.fuse_multibonds_()

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_core.py:8513, in TensorNetwork.__ixor__(self, tags)
   8511 def __ixor__(self, tags):
   8512     """Overload of '^=' for inplace TensorNetwork.contract."""
-> 8513     return self.contract(tags, inplace=True)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_core.py:8371, in TensorNetwork.contract(self, tags, output_inds, optimize, get, backend, preserve_tensor, max_bond, inplace, **opts)
   8368     return tensor_contract(*self.tensor_map.values(), **opts)
   8370 # contract some or all tensors, but keeping tensor network
-> 8371 return self.contract_tags(tags, inplace=inplace, **opts)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_core.py:8237, in TensorNetwork.contract_tags(self, tags, which, output_inds, optimize, get, backend, preserve_tensor, inplace, **contract_opts)
   8232 # whether we should let tensor_contract return a raw scalar
   8233 preserve_tensor = (
   8234     preserve_tensor or inplace or (tn.num_tensors >= 1)
   8235 )
-> 8237 t = tensor_contract(
   8238     *tagged_ts,
   8239     output_inds=output_inds,
   8240     optimize=optimize,
   8241     get=get,
   8242     backend=backend,
   8243     preserve_tensor=preserve_tensor,
   8244     **contract_opts,
   8245 )
   8247 if (tn.num_tensors == 0) and (not inplace):
   8248     # contracted all down to single tensor or scalar -> return it
   8249     # (apart from if inplace -> we want to keep the tensor network)
   8250     return t

File /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/functools.py:907, in singledispatch.<locals>.wrapper(*args, **kw)
    903 if not args:
    904     raise TypeError(f'{funcname} requires at least '
    905                     '1 positional argument')
--> 907 return dispatch(args[0].__class__)(*args, **kw)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_core.py:277, in tensor_contract(output_inds, optimize, get, backend, preserve_tensor, drop_tags, *tensors, **contract_opts)
    266     return _tensor_contract_get_other(
    267         arrays=arrays,
    268         inds=inds,
   (...)
    273         **contract_opts,
    274     )
    276 # perform the contraction!
--> 277 data_out = array_contract(
    278     arrays,
    279     inds,
    280     inds_out,
    281     optimize=optimize,
    282     backend=backend,
    283     **contract_opts,
    284 )
    286 if not inds_out and not preserve_tensor:
    287     return maybe_realify_scalar(data_out)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/contraction.py:286, in array_contract(arrays, inputs, output, optimize, backend, **kwargs)
    284 if backend is None:
    285     backend = get_contract_backend()
--> 286 return ctg.array_contract(
    287     arrays, inputs, output, optimize=optimize, backend=backend, **kwargs
    288 )

File ~/Prog/pl/lib/python3.12/site-packages/cotengra/interface.py:794, in array_contract(arrays, inputs, output, optimize, cache_expression, backend, **kwargs)
    785 shapes = tuple(map(ar.shape, arrays))
    786 expr = array_contract_expression(
    787     inputs,
    788     output,
   (...)
    792     **kwargs,
    793 )
--> 794 return expr(*arrays, backend=backend)

File ~/Prog/pl/lib/python3.12/site-packages/cotengra/contract.py:760, in Contractor.__call__(self, *arrays, **kwargs)
    757 for p, l, r, tdot, arg, perm in contractions:
    758     if (l is None) and (r is None):
    759         # single term simplification, perform inplace with einsum
--> 760         temps[p] = _einsum(arg, temps[p])
    761         continue
    763     # get input arrays for this contraction

File ~/Prog/pl/lib/python3.12/site-packages/cotengra/contract.py:423, in einsum(eq, a, b, backend)
    402 """Perform arbitrary single and pairwise einsums using only `matmul`,
    403 `transpose`, `reshape` and `sum`.  The logic for each is cached based on
    404 the equation and array shape, and each step is only performed if necessary.
   (...)
    420 array_like
    421 """
    422 if b is None:
--> 423     return _einsum_single(eq, a, backend=backend)
    425 (
    426     eq_a,
    427     eq_b,
   (...)
    432     pure_multiplication,
    433 ) = _parse_eq_to_batch_matmul(eq, shape(a), shape(b))
    435 return _do_contraction_via_bmm(
    436     a,
    437     b,
   (...)
    445     backend,
    446 )

File ~/Prog/pl/lib/python3.12/site-packages/cotengra/contract.py:334, in _einsum_single(eq, x, backend)
    328 """Einsum on a single tensor, via three steps: diagonal selection
    329 (via advanced indexing), axes summations, transposition. The logic for each
    330 is cached based on the equation and array shape, and each step is only
    331 performed if necessary.
    332 """
    333 try:
--> 334     return do("einsum", eq, x, like=backend)
    335 except ImportError:
    336     pass

File ~/Prog/pl/lib/python3.12/site-packages/autoray/autoray.py:81, in do(fn, like, *args, **kwargs)
     79 backend = _choose_backend(fn, args, kwargs, like=like)
     80 func = get_lib_fn(backend, fn)
---> 81 return func(*args, **kwargs)

File ~/Prog/pl/lib/python3.12/site-packages/numpy/core/einsumfunc.py:1371, in einsum(out, optimize, *operands, **kwargs)
   1369     if specified_out:
   1370         kwargs['out'] = out
-> 1371     return c_einsum(*operands, **kwargs)
   1373 # Check the kwargs to avoid a more cryptic error later, without having to
   1374 # repeat default values here
   1375 valid_einsum_kwargs = ['dtype', 'order', 'casting']

ValueError: dimensions in operand 0 for collapsing index 'c' don't match (1 != 2)



#################################################################################

---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
Cell In[6], line 6
      3     qml.GlobalPhase(x)
      4     return qml.expval(qml.Z(0))
----> 6 circuit(0.5)

File ~/Prog/pennylane/pennylane/workflow/qnode.py:987, in QNode.__call__(self, *args, **kwargs)
    985 if qml.capture.enabled():
    986     return qml.capture.qnode_call(self, *args, **kwargs)
--> 987 return self._impl_call(*args, **kwargs)

File ~/Prog/pennylane/pennylane/workflow/qnode.py:977, in QNode._impl_call(self, *args, **kwargs)
    974     self._interface = interface
    976 try:
--> 977     res = self._execution_component(args, kwargs)
    978 finally:
    979     if old_interface == "auto":

File ~/Prog/pennylane/pennylane/workflow/qnode.py:935, in QNode._execution_component(self, args, kwargs)
    932 interface = None if self.interface == "numpy" else self.interface
    934 # pylint: disable=unexpected-keyword-arg
--> 935 res = qml.execute(
    936     (self._tape,),
    937     device=self.device,
    938     gradient_fn=gradient_fn,
    939     interface=interface,
    940     transform_program=full_transform_program,
    941     inner_transform=inner_transform_program,
    942     config=config,
    943     gradient_kwargs=gradient_kwargs,
    944     **execute_kwargs,
    945 )
    946 res = res[0]
    948 # convert result to the interface in case the qfunc has no parameters

File ~/Prog/pennylane/pennylane/workflow/execution.py:528, in execute(tapes, device, gradient_fn, interface, transform_program, inner_transform, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, device_vjp, mcm_config)
    526 # Exiting early if we do not need to deal with an interface boundary
    527 if no_interface_boundary_required:
--> 528     results = inner_execute(tapes)
    529     return post_processing(results)
    531 if (
    532     device_vjp
    533     and getattr(device, "short_name", "") in ("lightning.gpu", "lightning.kokkos")
    534     and interface in jpc_interfaces
    535 ):  # pragma: no cover

File ~/Prog/pennylane/pennylane/workflow/execution.py:207, in _make_inner_execute.<locals>.inner_execute(tapes, **_)
    204 transformed_tapes, transform_post_processing = transform_program(tapes)
    206 if transformed_tapes:
--> 207     results = device.execute(transformed_tapes, execution_config=execution_config)
    208 else:
    209     results = ()

File ~/Prog/pennylane/pennylane/devices/modifiers/simulator_tracking.py:30, in _track_execute.<locals>.execute(self, circuits, execution_config)
     28 @wraps(untracked_execute)
     29 def execute(self, circuits, execution_config=DefaultExecutionConfig):
---> 30     results = untracked_execute(self, circuits, execution_config)
     31     if isinstance(circuits, QuantumScript):
     32         batch = (circuits,)

File ~/Prog/pennylane/pennylane/devices/modifiers/single_tape_support.py:32, in _make_execute.<locals>.execute(self, circuits, execution_config)
     30     is_single_circuit = True
     31     circuits = (circuits,)
---> 32 results = batch_execute(self, circuits, execution_config)
     33 return results[0] if is_single_circuit else results

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:649, in DefaultTensor.execute(self, circuits, execution_config)
    643         raise WireError(
    644             "Mismatch between circuit and device wires. "
    645             f"Circuit has wires {circuit.wires.tolist()}. "
    646             f"Tensor on device has wires {self.wires.tolist()}"
    647         )
    648     circuit = circuit.map_to_standard_wires()
--> 649     results.append(self.simulate(circuit))
    651 return tuple(results)

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:690, in DefaultTensor.simulate(self, circuit)
    687     self._quimb_circuit = self._initial_quimb_circuit(wires)
    689 for op in operations:
--> 690     self._apply_operation(op)
    692 if not circuit.shots:
    693     if len(circuit.measurements) == 1:

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:707, in DefaultTensor._apply_operation(self, op)
    699 def _apply_operation(self, op: qml.operation.Operator) -> None:
    700     """Apply a single operator to the circuit.
    701 
    702     Internally it uses ``quimb``'s ``apply_gate`` method. This method modifies the tensor state of the device.
   (...)
    705         op (Operator): The operation to apply.
    706     """
--> 707     apply_operation_core(op, self)

File /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/functools.py:907, in singledispatch.<locals>.wrapper(*args, **kw)
    903 if not args:
    904     raise TypeError(f'{funcname} requires at least '
    905                     '1 positional argument')
--> 907 return dispatch(args[0].__class__)(*args, **kw)

File ~/Prog/pennylane/pennylane/devices/default_tensor.py:928, in apply_operation_core(ops, device)
    925 @singledispatch
    926 def apply_operation_core(ops: Operation, device):
    927     """Dispatcher for _apply_operation."""
--> 928     device._quimb_circuit.apply_gate(
    929         qml.matrix(ops).astype(device._c_dtype), *ops.wires, parametrize=None
    930     )

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/circuit.py:1896, in Circuit.apply_gate(self, gate_id, params, qubits, controls, gate_round, parametrize, *gate_args, **gate_opts)
   1854 """Apply a single gate to this tensor network quantum circuit. If
   1855 ``gate_round`` is supplied the tensor(s) added will be tagged with
   1856 ``'ROUND_{gate_round}'``. Alternatively, putting an integer first like
   (...)
   1885     default ``gate_opts``.
   1886 """
   1887 gate = parse_to_gate(
   1888     gate_id,
   1889     *gate_args,
   (...)
   1894     parametrize=parametrize,
   1895 )
-> 1896 self._apply_gate(gate, **gate_opts)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/circuit.py:1838, in Circuit._apply_gate(self, gate, tags, **gate_opts)
   1835         G = self._backend_gate_cache[key]
   1837     # apply the gate to the TN!
-> 1838     self._psi.gate_(G, gate.qubits, tags=tags, **opts)
   1840 # keep track of the gates applied
   1841 self._gates.append(gate)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:602, in TensorNetwork1DVector.gate(self, inplace, *args, **kwargs)
    600 @functools.wraps(gate_TN_1D)
    601 def gate(self, *args, inplace=False, **kwargs):
--> 602     return gate_TN_1D(self, *args, inplace=inplace, **kwargs)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:255, in gate_TN_1D(tn, G, where, contract, tags, propagate_tags, info, inplace, cur_orthog, **compress_opts)
    253         contract = True
    254     else:
--> 255         return tn.gate_nonlocal(
    256             G,
    257             where,
    258             cur_orthog=cur_orthog,
    259             info=info,
    260             inplace=inplace,
    261             **compress_opts,
    262         )
    264 # can use generic gate method
    265 return TensorNetworkGenVector.gate(
    266     tn,
    267     G,
   (...)
    274     **compress_opts,
    275 )

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:379, in convert_cur_orthog.<locals>.wrapped(self, cur_orthog, info, *args, **kwargs)
    376 @functools.wraps(fn)
    377 def wrapped(self, *args, cur_orthog=None, info=None, **kwargs):
    378     info = parse_cur_orthog(cur_orthog, info)
--> 379     return fn(self, *args, info=info, **kwargs)

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:2281, in MatrixProductState.gate_nonlocal(self, G, where, dims, method, info, inplace, **compress_opts)
   2278     dims = tuple(self.phys_dim(i) for i in where)
   2280 # create a sub-MPO and lazily combine it with the MPS
-> 2281 mpo = MatrixProductOperator.from_dense(
   2282     G, dims=dims, sites=where, L=self.L
   2283 )
   2285 return self.gate_with_submpo_(
   2286     mpo,
   2287     where=where,
   (...)
   2292     **compress_opts,
   2293 )

File ~/Prog/pl/lib/python3.12/site-packages/quimb/tensor/tensor_1d.py:3602, in MatrixProductOperator.from_dense(cls, A, dims, sites, L, tags, site_tag_id, upper_ind_id, lower_ind_id, **split_opts)
   3599     mpo |= tl
   3601 # add final right tensor
-> 3602 tm.add_tag(mpo.site_tag(sorted_sites[-1]))
   3603 mpo |= tm
   3605 # add global tags

IndexError: list index out of range

System information

pl master

Existing GitHub issues

  • I have searched existing GitHub issues to make sure the issue does not already exist.
@albi3ro albi3ro added the bug 🐛 Something isn't working label Oct 18, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug 🐛 Something isn't working
Projects
None yet
Development

No branches or pull requests

1 participant