Skip to content

Commit

Permalink
Merge branch 'csa' into variational_khk
Browse files Browse the repository at this point in the history
  • Loading branch information
dwierichs committed Nov 14, 2024
2 parents 9cd1578 + a3d9afd commit 36bf513
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 60 deletions.
58 changes: 32 additions & 26 deletions pennylane/labs/dla/dense_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility tools for dense Lie algebra representations"""
# pylint: disable=too-many-return-statements, missing-function-docstring
from collections.abc import Iterable
from functools import reduce

# pylint: disable=too-many-return-statements, missing-function-docstring
from itertools import combinations
from typing import List, Optional, Union

Expand Down Expand Up @@ -314,25 +313,26 @@ def apply_basis_change(change_op, targets):
def orthonormalize(basis):
r"""Orthonormalize a list of basis vectors"""

if isinstance(basis, PauliVSpace) or all(
isinstance(op, (PauliSentence, Operator)) for op in basis
):
return _orthonormalize_ps(basis)

if all(isinstance(op, np.ndarray) for op in basis):
return _orthonormalize_np(basis)

if all(isinstance(op, (PauliSentence, PauliVSpace, Operator)) for op in basis):
return _orthonormalize_ps(basis)

raise NotImplementedError(f"orthonormalize not implemented for {basis} of type {type(basis)}")
raise NotImplementedError(
f"orthonormalize not implemented for basis of type {type(basis[0])}:\n{basis}"
)


def _orthonormalize_np(basis: Iterable[np.ndarray]):
gram_inv = np.linalg.pinv(
scipy.linalg.sqrtm(np.tensordot(basis, basis, axes=[[1, 2], [2, 1]]).real)
)
return np.tensordot(gram_inv, basis, axes=1) * np.sqrt(
len(basis[0])
) # TODO absolutely not sure about this
basis = np.array(basis)
gram_inv = np.linalg.pinv(scipy.linalg.sqrtm(trace_inner_product(basis, basis).real))
return np.tensordot(gram_inv, basis, axes=1)


def _orthonormalize_ps(basis: Iterable[Union[PauliSentence, PauliVSpace, Operator]]):
def _orthonormalize_ps(basis: Union[PauliVSpace, Iterable[Union[PauliSentence, Operator]]]):
if isinstance(basis, PauliVSpace):
basis = basis.basis

Expand Down Expand Up @@ -381,29 +381,35 @@ def gram_schmidt(X):

def check_orthonormal(g, inner_product):
"""Utility function to check if operators in ``g`` are orthonormal with respect to the provided ``inner_product``"""
norm = np.zeros((len(g), len(g)), dtype=complex)
for i, gi in enumerate(g):
for j, gj in enumerate(g):
norm[i, j] = inner_product(gi, gj)

return np.allclose(norm, np.eye(len(norm)))
for op in g:
if not np.isclose(inner_product(op, op), 1.0):
return False
for opi, opj in combinations(g, r=2):
if not np.isclose(inner_product(opi, opj), 0.0):
return False
return True


def trace_inner_product(
A: Union[PauliSentence, Operator, np.ndarray], B: Union[PauliSentence, Operator, np.ndarray]
):
r"""Trace inner product :math:`\langle A, B \rangle = \text{tr}\left(A^\dagger B\right)/\text{dim}(A)`"""
r"""Trace inner product :math:`\langle A, B \rangle = \text{tr}\left(A^\dagger B\right)/\text{dim}(A)`.
If the inputs are ``np.ndarray``, leading broadcasting axes are supported for either or both
inputs.
"""
if getattr(A, "pauli_rep", None) is not None and getattr(B, "pauli_rep", None) is not None:
return (A.pauli_rep @ B.pauli_rep).trace()

if not isinstance(A, type(B)):
raise TypeError("Both input operators need to be of the same type")

if isinstance(A, np.ndarray):
assert np.allclose(A.shape, B.shape)
return np.trace(A.conj().T @ B) / (len(A))
assert A.shape[-2:] == B.shape[-2:]
# The axes of the first input are switched, compared to tr[A@B], because we need to
# transpose A.
return np.tensordot(A.conj(), B, axes=[[-1, -2], [-1, -2]]) / A.shape[-1]

if isinstance(A, (PauliSentence, PauliWord)):
return (A @ B).trace()

if getattr(A, "pauli_rep", None) is not None and getattr(B, "pauli_rep", None) is not None:
return (A.pauli_rep @ B.pauli_rep).trace()

return NotImplemented
raise NotImplementedError
8 changes: 6 additions & 2 deletions pennylane/labs/dla/structure_constants_dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def structure_constants_dense(g: TensorLike, is_orthonormal: bool = True) -> Ten
g = np.array(g)

assert g.shape[2] == g.shape[1]
chi = g.shape[1]
# Assert Hermiticity of the input. Otherwise we'll get the sign wrong
assert np.allclose(g.conj().transpose((0, 2, 1)), g)

Expand All @@ -103,16 +104,19 @@ def structure_constants_dense(g: TensorLike, is_orthonormal: bool = True) -> Ten

# project commutators on the basis of g, see docstring for details.
# Axis ordering is (dimg, _chi_, *chi*) x (dimg, *chi*, dimg, _chi_) -> (dimg, dimg, dimg)
adj = (1j * np.tensordot(g, all_coms, axes=[[1, 2], [3, 1]])).real
# Normalize trace inner product by dimension chi
adj = (1j * np.tensordot(g / chi, all_coms, axes=[[1, 2], [3, 1]])).real

if not is_orthonormal:
# If the basis is not orthonormal, compute the Gram matrix and apply its
# (pseudo-)inverse to the obtained projections. See the docstring for details.
# The Gram matrix is just one additional diagonal contraction of the ``prod`` tensor,
# across the Hilbert space dimensions. (dimg, _chi_, dimg, _chi_) -> (dimg, dimg)
# This contraction is missing the normalization factor 1/chi of the trace inner product.
gram_inv = np.linalg.pinv(np.sum(np.diagonal(prod, axis1=1, axis2=3), axis=-1).real)
# Axis ordering for contraction with gamma axis of raw structure constants:
# (dimg, _dimg_), (_dimg_, dimg, dimg) -> (dimg, dimg, dim)
adj = np.tensordot(gram_inv, adj, axes=1)
# Here we add the missing normalization factor of the trace inner product (after inversion)
adj = np.tensordot(gram_inv * chi, adj, axes=1)

return adj
54 changes: 49 additions & 5 deletions pennylane/labs/tests/dla/test_dense_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
pauli_decompose,
trace_inner_product,
)
from pennylane.pauli import PauliVSpace

# Make an operator matrix on given wire and total wire count
I_ = lambda w, n: I(w).matrix(wire_order=range(n))
Expand Down Expand Up @@ -309,26 +310,69 @@ def test_op_to_adjvec_consistent_with_input_types():
assert np.allclose(res1, res2)


@pytest.mark.parametrize("op1", [X(0), X(0) @ X(1), X(0) @ Y(2), X(0) @ Z(1) + X(1) @ X(2)])
@pytest.mark.parametrize("op2", [X(0), X(0) @ X(1), X(0) @ Y(2), X(0) @ Z(1) + X(1) @ X(2)])
@pytest.mark.parametrize("op1", [X(0), -0.8 * X(0) @ X(1), X(0) @ Y(2), X(0) @ Z(1) + X(1) @ X(2)])
@pytest.mark.parametrize(
"op2", [X(0), X(0) + X(0) @ X(1), 0.2 * X(0) @ Y(2), X(0) @ Z(1) + X(1) @ X(2)]
)
def test_trace_inner_product_consistency(op1, op2):
"""Test that the trace inner product norm for different operators is consistent"""
res1 = trace_inner_product(
qml.matrix(op1, wire_order=range(3)), qml.matrix(op2, wire_order=range(3))
)
res2 = trace_inner_product(op1.pauli_rep, op2.pauli_rep)
res3 = trace_inner_product(op1, op2)
assert np.allclose(res1, res2)
assert np.allclose(res1, res3)


id_pw = qml.pauli.PauliWord({})


@pytest.mark.parametrize(
"g, inner_product",
[
(qml.ops.qubit.special_unitary.pauli_basis_matrices(3), trace_inner_product),
(qml.pauli.pauli_group(4), lambda A, B: (A @ B).pauli_rep.trace()),
(qml.pauli.pauli_group(4), lambda A, B: (A.pauli_rep @ B.pauli_rep).get(id_pw, 0.0)),
(list("abcdefghi"), lambda A, B: int(A == B)),
],
)
def test_check_orthonormal_True(g, inner_product):
"""Test check_orthonormal"""
assert check_orthonormal(g, inner_product)


# The reasons the following are not orthonormal are:
# Non-normalized ops
# Non-orthogonal ops
# Inner product is non-normalized trace inner product


@pytest.mark.parametrize(
"g, inner_product",
[
([np.eye(2), qml.X(0).matrix() + qml.Z(0).matrix()], trace_inner_product),
([qml.X(0).matrix(), qml.X(0).matrix() + qml.Z(0).matrix()], trace_inner_product),
(qml.pauli.pauli_group(2), lambda A, B: np.trace((A @ B).matrix())),
],
)
def test_check_orthonormal_False(g, inner_product):
"""Test check_orthonormal"""
assert not check_orthonormal(g, inner_product)


gens1 = [X(i) @ X(i + 1) + Y(i) @ Y(i + 1) + Z(i) @ Z(i + 1) for i in range(3)]
Heisenberg4_sum_op = qml.lie_closure(gens1)
Heisenberg4_sum_ps = [op.pauli_rep for op in Heisenberg4_sum_op]
Heisenberg4_sum_vspace = PauliVSpace(Heisenberg4_sum_ps)
Heisenberg4_sum_dense = [qml.matrix(op, wire_order=range(4)) for op in Heisenberg4_sum_op]


@pytest.mark.parametrize("g", [Heisenberg4_sum_ps, Heisenberg4_sum_dense])
def test_orthonormalize_ps(g):
"""Test orthonormalize on pauli sentences"""
@pytest.mark.parametrize(
"g", [Heisenberg4_sum_ps, Heisenberg4_sum_vspace, Heisenberg4_sum_op, Heisenberg4_sum_dense]
)
def test_orthonormalize(g):
"""Test orthonormalize"""

g = orthonormalize(g)

Expand Down
46 changes: 19 additions & 27 deletions pennylane/labs/tests/dla/test_structure_constants_dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,24 @@

import numpy as np
import pytest
import scipy as sp

import pennylane as qml
from pennylane import X, Y, Z
from pennylane.labs.dla import (
check_orthonormal,
orthonormalize,
pauli_decompose,
structure_constants_dense,
trace_inner_product,
)
from pennylane.pauli import PauliSentence, PauliWord

## Construct some example DLAs
# TFIM
gens = [PauliSentence({PauliWord({i: "X", i + 1: "X"}): 1.0}) for i in range(1)]
gens += [PauliSentence({PauliWord({i: "Z"}): 1.0}) for i in range(2)]
Ising2 = qml.lie_closure(gens, pauli=True)

gens = [PauliSentence({PauliWord({i: "X", i + 1: "X"}): 1.0}) for i in range(2)]
gens += [PauliSentence({PauliWord({i: "Z"}): 1.0}) for i in range(3)]
Ising3 = qml.lie_closure(gens, pauli=True)
Expand Down Expand Up @@ -68,39 +72,31 @@ def test_structure_constants_dim(self):

def test_structure_constants_with_is_orthonormal(self):
"""Test that the structure constants with is_orthonormal=True/False match for
orthogonal inputs."""
orthonormal inputs."""

Ising3_dense = np.array([qml.matrix(op, wire_order=range(3)) for op in Ising3]) / np.sqrt(8)
Ising3_dense = np.array([qml.matrix(op, wire_order=range(3)) for op in Ising3])
assert check_orthonormal(Ising3_dense, trace_inner_product)
adjoint_true = structure_constants_dense(Ising3_dense, is_orthonormal=True)
adjoint_false = structure_constants_dense(Ising3_dense, is_orthonormal=False)
assert np.allclose(adjoint_true, adjoint_false)

@pytest.mark.parametrize(
"dla, use_orthonormal",
[
(Ising3, True),
(Ising3, False),
(XXZ3, True),
(XXZ3, False),
(Heisenberg3_sum, True),
(Heisenberg3_sum, False),
(sum_XXZ3, True),
(sum_XXZ3, False),
],
)
@pytest.mark.parametrize("dla", [Ising2, Ising3, XXZ3, Heisenberg3_sum, sum_XXZ3])
@pytest.mark.parametrize("use_orthonormal", [True, False])
def test_structure_constants_elements(self, dla, use_orthonormal):
r"""Test relation :math:`[i G_\alpha, i G_\beta] = \sum_{\gamma = 0}^{\mathfrak{d}-1} f^\gamma_{\alpha, \beta} iG_\gamma`."""
r"""Test relation :math:`[i G_α, i G_β] = \sum_{γ=0}^{d-1} f^γ_{α,β} iG_γ_`."""

d = len(dla)
dla_dense = np.array([qml.matrix(op, wire_order=range(3)) for op in dla])

if use_orthonormal:
dla = orthonormalize(dla)
assert check_orthonormal(dla, trace_inner_product)
dla_dense = orthonormalize(dla_dense)
assert check_orthonormal(dla_dense, trace_inner_product)
dla = pauli_decompose(dla_dense, pauli=True)
assert check_orthonormal(dla, trace_inner_product)

ad_rep_non_dense = qml.structure_constants(dla, is_orthogonal=False)
ad_rep = structure_constants_dense(dla_dense, is_orthonormal=use_orthonormal)
assert np.allclose(ad_rep, ad_rep_non_dense)
for i in range(d):
for j in range(d):

Expand All @@ -116,15 +112,11 @@ def test_structure_constants_elements(self, dla, use_orthonormal):
@pytest.mark.parametrize("use_orthonormal", [False, True])
def test_use_operators(self, dla, use_orthonormal):
"""Test that operators can be passed and lead to the same result"""
ops = np.array([qml.matrix(op.operation(), wire_order=range(3)) for op in dla])

if use_orthonormal:
gram_inv = sp.linalg.sqrtm(
np.linalg.pinv(np.tensordot(ops, ops, axes=[[1, 2], [2, 1]]).real)
)
ops = np.tensordot(gram_inv, ops, axes=1)
dla = [(scale * op).pauli_rep for scale, op in zip(np.diag(gram_inv), dla)]
dla = orthonormalize(dla)

ops = np.array([qml.matrix(op.operation(), wire_order=range(3)) for op in dla])

ad_rep_true = qml.pauli.dla.structure_constants(dla)
ad_rep_true = qml.structure_constants(dla)
ad_rep = structure_constants_dense(ops, is_orthonormal=use_orthonormal)
assert qml.math.allclose(ad_rep, ad_rep_true)

0 comments on commit 36bf513

Please sign in to comment.