VQE on 1D TFIM with Different Hamiltonian Representation#

Overview#

For the ground state preparation of a Hamiltonian \(H\) in VQE, we need to calculate the expectation value of Hamiltonian \(H\), i.e., \(\langle 0^N \vert U^{\dagger}(\theta) H U(\theta) \vert 0^N \rangle\) and update the parameters \(\theta\) in \(U(\theta)\) based on gradient descent. In this tutorial, we will show four ways supported in TensorCircuit to calculate \(\langle H \rangle\):

1, \(\langle H \rangle = \sum_{i} \langle h_{i} \rangle\), where \(h_{i}\) are the Pauli-string operators;

2, \(\langle H \rangle\) where \(H\) is a sparse matrix;

3, \(\langle H \rangle\) where \(H\) is a dense matrix;

4, expectation value of the Matrix Product Operator (MPO).

We consider transverse field ising model (TFIM) here, which reads

\[H = \sum_{i} \sigma_{i}^{x} \sigma_{i+1}^{x} - \sum_{i} \sigma_{i}^{z},\]

where \(\sigma_{i}^{x,z}\) are Pauli matrices of the \(i\)-th qubit.

Setup#

[1]:
import numpy as np
import tensorflow as tf
import tensorcircuit as tc
import tensornetwork as tn
from tensorcircuit.templates.measurements import operator_expectation
from tensorcircuit.quantum import quantum_constructor

tc.set_backend("tensorflow")
tc.set_dtype("complex128")
dtype = np.complex128

xx = tc.gates._xx_matrix  # xx gate matrix to be utilized

Parameters#

[2]:
n = 4  # The number of qubits
nlayers = 2  # The number of circuit layers
ntrials = 2  # The number of random circuit instances

Parameterized Quantum Circuits#

[3]:
def tfi_circuit(param, n, nlayers):
    c = tc.Circuit(n)
    for j in range(nlayers):
        for i in range(n - 1):
            c.exp1(i, i + 1, unitary=xx, theta=param[2 * j, i])
        for i in range(n):
            c.rz(i, theta=param[2 * j + 1, i])
    return c

Pauli-string Operators#

Energy#

[4]:
def tfi_energy(c: tc.Circuit, j: float = 1.0, h: float = -1.0):
    e = 0.0
    n = c._nqubits
    for i in range(n):
        e += h * c.expectation((tc.gates.z(), [i]))  # <Z_i>
    for i in range(n - 1):  # OBC
        e += j * c.expectation(
            (tc.gates.x(), [i]), (tc.gates.x(), [(i + 1) % n])
        )  # <X_iX_{i+1}>
    return tc.backend.real(e)
[5]:
def vqe_tfim_paulistring(param, n, nlayers):
    c = tfi_circuit(param, n, nlayers)
    e = tfi_energy(c)
    return e
[6]:
vqe_tfim_paulistring_vvag = tc.backend.jit(
    tc.backend.vectorized_value_and_grad(vqe_tfim_paulistring)
)  # use vvag to get losses and gradients of different random circuit instances

Main Optimization Loop#

[7]:
def batched_train_step_paulistring_tf(batch, n, nlayers, maxiter=10000):
    param = tf.Variable(
        initial_value=tf.random.normal(
            shape=[batch, nlayers * 2, n], stddev=0.1, dtype=getattr(tf, tc.rdtypestr)
        )
    )  # initial parameters
    opt = tf.keras.optimizers.Adam(1e-2)
    for i in range(maxiter):
        e, grad = vqe_tfim_paulistring_vvag(
            param.value(), n, nlayers
        )  # energy and gradients
        opt.apply_gradients([(grad, param)])
        if i % 200 == 0:
            print(e)
    return e


batched_train_step_paulistring_tf(ntrials, n, nlayers, 400)
2022-03-16 14:09:12.304188: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
tf.Tensor([-4.00557571 -3.97372412], shape=(2,), dtype=float64)
tf.Tensor([-4.68208061 -4.684804  ], shape=(2,), dtype=float64)
[7]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.75683202, -4.73689914])>

Sparse Matrix, Dense Matrix, and MPO#

Hamiltonian#

[8]:
def tfi_hamiltonian():
    h = []
    w = []

    ### Z
    for i in range(n):
        h.append([])
        w.append(-1.0)  # weight
        for j in range(n):
            if j == i:
                h[i].append(3)
            else:
                h[i].append(0)

    ### XX
    for i in range(n - 1):
        h.append([])
        w.append(1.0)  # weight
        for j in range(n):
            if j == (i + 1) % n or j == i:
                h[i + n].append(1)
            else:
                h[i + n].append(0)

    hamiltonian_sparse = tc.quantum.PauliStringSum2COO(
        tf.constant(h, dtype=tf.complex128), tf.constant(w, dtype=tf.complex128)
    )  # sparse matrix

    hamiltonian_dense = tc.quantum.PauliStringSum2Dense(
        tf.constant(h, dtype=tf.complex128), tf.constant(w, dtype=tf.complex128)
    )  # dense matrix
    return hamiltonian_sparse, hamiltonian_dense

Generate QuOperator#

[9]:
def quoperator_mpo(tfi_mpo):
    tfi_mpo = tfi_mpo.tensors

    mpo = []
    for i in range(n):
        mpo.append(tn.Node(tfi_mpo[i]))

    for i in range(n - 1):
        tn.connect(mpo[i][1], mpo[i + 1][0])

    tfi_mpo = quantum_constructor(
        [mpo[i][-1] for i in range(n)],  # out_edges
        [mpo[i][-2] for i in range(n)],  # in_edges
        [],
        [mpo[0][0], mpo[-1][1]],  # ignore_edges
    )
    return tfi_mpo

Energy#

[10]:
def vqe_tfim(param, n, nlayers, hamiltonian):
    c = tfi_circuit(param, n, nlayers)
    e = operator_expectation(
        c, hamiltonian
    )  # in operator_expectation, the "hamiltonian" can be sparse matrix, dense matrix or mpo
    return e
[11]:
vqe_tfim_vvag = tc.backend.jit(
    tc.backend.vectorized_value_and_grad(vqe_tfim)
)  # use vvag to get losses and gradients of different random circuit instances

Main Optimization Loop#

[12]:
def batched_train_step_tf(batch, n, nlayers, hamiltonian, maxiter=10000):
    param = tf.Variable(
        initial_value=tf.random.normal(
            shape=[batch, nlayers * 2, n], stddev=0.1, dtype=getattr(tf, tc.rdtypestr)
        )
    )  # initial parameters

    opt = tf.keras.optimizers.Adam(1e-2)
    for i in range(maxiter):
        e, grad = vqe_tfim_vvag(
            param.value(), n, nlayers, hamiltonian
        )  # energy and gradients
        opt.apply_gradients([(grad, param)])
        if i % 200 == 0:
            print(e)
    return e

Sparse Matrix, Dense Matrix, and MPO#

[13]:
(
    hamiltonian_sparse,
    hamiltonian_dense,
) = tfi_hamiltonian()  # hamiltonian: sparse matrix, dense matrix

Jx = np.array([1.0 for _ in range(n - 1)])  # strength of xx interaction (OBC)
Bz = np.array([1.0 for _ in range(n)])  # strength of transverse field
hamiltonian_mpo = tn.matrixproductstates.mpo.FiniteTFI(
    Jx, Bz, dtype=dtype
)  # matrix product operator
hamiltonian_mpo = quoperator_mpo(hamiltonian_mpo)  # generate QuOperator from mpo
2022-03-16 14:09:30.874680: I tensorflow/compiler/xla/service/service.cc:171] XLA service 0x7fd94503abc0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2022-03-16 14:09:30.874726: I tensorflow/compiler/xla/service/service.cc:179]   StreamExecutor device (0): Host, Default Version
2022-03-16 14:09:31.014341: I tensorflow/compiler/jit/xla_compilation_cache.cc:351] Compiled cluster using XLA!  This line is logged at most once for the lifetime of the process.
[14]:
batched_train_step_tf(ntrials, n, nlayers, hamiltonian_sparse, 400)  # sparse matrix
WARNING:tensorflow:Using a while_loop for converting SparseTensorDenseMatMul
tf.Tensor([-4.04418884 -3.22012342], shape=(2,), dtype=float64)
tf.Tensor([-4.67668625 -4.66761143], shape=(2,), dtype=float64)
[14]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.74512239, -4.69965641])>
[15]:
batched_train_step_tf(ntrials, n, nlayers, hamiltonian_dense, 400)  # dense matrix
tf.Tensor([-3.72705324 -3.99225849], shape=(2,), dtype=float64)
tf.Tensor([-4.70773521 -4.7330719 ], shape=(2,), dtype=float64)
[15]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.74236986, -4.7559722 ])>
[16]:
batched_train_step_tf(ntrials, n, nlayers, hamiltonian_mpo, 400)  # mpo
tf.Tensor([-3.9129593  -3.44283879], shape=(2,), dtype=float64)
tf.Tensor([-4.68271695 -4.67584305], shape=(2,), dtype=float64)
[16]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.75283209, -4.75535872])>