具有不同哈密顿量表示的一维 TFIM 上的 VQE#

概述#

对于 VQE 中哈密顿量 \(H\) 的基态准备,我们需要计算哈密顿量 \(H\) 的期望值,即 \(\langle 0^N \vert U^{\dagger}(\theta) HU(\ theta) \vert 0^N \rangle\) 并根据梯度下降更新 \(U(\theta)\) 中的参数 \(\theta\)。 在本教程中,我们将展示 TensorCircuit 支持的四种计算 \(\langle H \rangle\) 的方法:

1, \(\langle H \rangle = \sum_{i} \langle h_{i} \rangle\),其中\(h_{i}\)是泡利串运算符;

2, \(\langle H \rangle\) 其中 \(H\) 是稀疏矩阵;

3, \(\langle H \rangle\) 其中 \(H\) 是密集矩阵;

4, 矩阵乘积算子 (MPO) 的期望值。

我们在这里考虑横向场 Ising 模型,也即:(TFIM),它读取

\[H = \sum_{i} \sigma_{i}^{x} \sigma_{i+1}^{x} - \sum_{i} \sigma_{i}^{z},\]

其中 \(\sigma_{i}^{x,z}\) 是第 \(i\) 个量子比特的泡利矩阵。

设置#

[1]:
import numpy as np
import tensorflow as tf
import tensorcircuit as tc
import tensornetwork as tn
from tensorcircuit.templates.measurements import operator_expectation
from tensorcircuit.quantum import quantum_constructor

tc.set_backend("tensorflow")
tc.set_dtype("complex128")
dtype = np.complex128

xx = tc.gates._xx_matrix  # 要使用的 xx 门矩阵

参数#

[2]:
n = 4  # 量子比特数
nlayers = 2  # 电路层数
ntrials = 2  # 随机电路实例数

参数化量子电路#

[3]:
def tfi_circuit(param, n, nlayers):
    c = tc.Circuit(n)
    for j in range(nlayers):
        for i in range(n - 1):
            c.exp1(i, i + 1, unitary=xx, theta=param[2 * j, i])
        for i in range(n):
            c.rz(i, theta=param[2 * j + 1, i])
    return c

泡利字符串运算子#

能量#

[4]:
def tfi_energy(c: tc.Circuit, j: float = 1.0, h: float = -1.0):
    e = 0.0
    n = c._nqubits
    for i in range(n):
        e += h * c.expectation((tc.gates.z(), [i]))  # <Z_i>
    for i in range(n - 1):  # OBC
        e += j * c.expectation(
            (tc.gates.x(), [i]), (tc.gates.x(), [(i + 1) % n])
        )  # <X_iX_{i+1}>
    return tc.backend.real(e)
[5]:
def vqe_tfim_paulistring(param, n, nlayers):
    c = tfi_circuit(param, n, nlayers)
    e = tfi_energy(c)
    return e
[6]:
vqe_tfim_paulistring_vvag = tc.backend.jit(
    tc.backend.vectorized_value_and_grad(vqe_tfim_paulistring)
)  # 使用 vvag 获取不同随机电路实例的损失函数和梯度

主优化循环#

[7]:
def batched_train_step_paulistring_tf(batch, n, nlayers, maxiter=10000):
    param = tf.Variable(
        initial_value=tf.random.normal(
            shape=[batch, nlayers * 2, n], stddev=0.1, dtype=getattr(tf, tc.rdtypestr)
        )
    )  # 初始参数
    opt = tf.keras.optimizers.Adam(1e-2)
    for i in range(maxiter):
        e, grad = vqe_tfim_paulistring_vvag(param.value(), n, nlayers)  # 能量和梯度
        opt.apply_gradients([(grad, param)])
        if i % 200 == 0:
            print(e)
    return e


batched_train_step_paulistring_tf(ntrials, n, nlayers, 400)
2022-03-16 14:09:12.304188: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
tf.Tensor([-4.00557571 -3.97372412], shape=(2,), dtype=float64)
tf.Tensor([-4.68208061 -4.684804  ], shape=(2,), dtype=float64)
[7]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.75683202, -4.73689914])>

稀疏矩阵、稠密矩阵和 MPO#

哈密顿量#

[8]:
def tfi_hamiltonian():
    h = []
    w = []

    ### Z
    for i in range(n):
        h.append([])
        w.append(-1.0)  # weight
        for j in range(n):
            if j == i:
                h[i].append(3)
            else:
                h[i].append(0)

    ### XX
    for i in range(n - 1):
        h.append([])
        w.append(1.0)  # weight
        for j in range(n):
            if j == (i + 1) % n or j == i:
                h[i + n].append(1)
            else:
                h[i + n].append(0)

    hamiltonian_sparse = tc.quantum.PauliStringSum2COO(
        tf.constant(h, dtype=tf.complex128), tf.constant(w, dtype=tf.complex128)
    )  # 稀疏矩阵
    hamiltonian_dense = tc.quantum.PauliStringSum2Dense(
        tf.constant(h, dtype=tf.complex128), tf.constant(w, dtype=tf.complex128)
    )  # 密集矩阵
    return hamiltonian_sparse, hamiltonian_dense

生成 QuOperator#

[9]:
def quoperator_mpo(tfi_mpo):
    tfi_mpo = tfi_mpo.tensors

    mpo = []
    for i in range(n):
        mpo.append(tn.Node(tfi_mpo[i]))

    for i in range(n - 1):
        tn.connect(mpo[i][1], mpo[i + 1][0])

    tfi_mpo = quantum_constructor(
        [mpo[i][-1] for i in range(n)],  # out_edges
        [mpo[i][-2] for i in range(n)],  # in_edges
        [],
        [mpo[0][0], mpo[-1][1]],  # ignore_edges
    )
    return tfi_mpo

能量#

[10]:
def vqe_tfim(param, n, nlayers, hamiltonian):
    c = tfi_circuit(param, n, nlayers)
    e = operator_expectation(
        c, hamiltonian
    )  # 在 operator_expectation 中,“hamiltonian” 可以是稀疏矩阵、密集矩阵或 mpo
    return e
[11]:
vqe_tfim_vvag = tc.backend.jit(
    tc.backend.vectorized_value_and_grad(vqe_tfim)
)  # use vvag to get losses and gradients of different random circuit instances

主优化循环#

[12]:
def batched_train_step_tf(batch, n, nlayers, hamiltonian, maxiter=10000):
    param = tf.Variable(
        initial_value=tf.random.normal(
            shape=[batch, nlayers * 2, n], stddev=0.1, dtype=getattr(tf, tc.rdtypestr)
        )
    )  # 初始参数

    opt = tf.keras.optimizers.Adam(1e-2)
    for i in range(maxiter):
        e, grad = vqe_tfim_vvag(param.value(), n, nlayers, hamiltonian)  # 能量和梯度
        opt.apply_gradients([(grad, param)])
        if i % 200 == 0:
            print(e)
    return e

稀疏矩阵、密集矩阵和 MPO#

[13]:
(
    hamiltonian_sparse,
    hamiltonian_dense,
) = tfi_hamiltonian()  # hamiltonian:稀疏矩阵,稠密矩阵

Jx = np.array([1.0 for _ in range(n - 1)])  # xx 相互作用的强度 (OBC)
Bz = np.array([1.0 for _ in range(n)])  # 横向场强
hamiltonian_mpo = tn.matrixproductstates.mpo.FiniteTFI(Jx, Bz, dtype=dtype)  # 矩阵乘积算子
hamiltonian_mpo = quoperator_mpo(hamiltonian_mpo)  # 从 mpo 生成 QuOperator
2022-03-16 14:09:30.874680: I tensorflow/compiler/xla/service/service.cc:171] XLA service 0x7fd94503abc0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2022-03-16 14:09:30.874726: I tensorflow/compiler/xla/service/service.cc:179]   StreamExecutor device (0): Host, Default Version
2022-03-16 14:09:31.014341: I tensorflow/compiler/jit/xla_compilation_cache.cc:351] Compiled cluster using XLA!  This line is logged at most once for the lifetime of the process.
[14]:
batched_train_step_tf(ntrials, n, nlayers, hamiltonian_sparse, 400)  # 稀疏矩阵
WARNING:tensorflow:Using a while_loop for converting SparseTensorDenseMatMul
tf.Tensor([-4.04418884 -3.22012342], shape=(2,), dtype=float64)
tf.Tensor([-4.67668625 -4.66761143], shape=(2,), dtype=float64)
[14]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.74512239, -4.69965641])>
[15]:
batched_train_step_tf(ntrials, n, nlayers, hamiltonian_dense, 400)  # 密集矩阵
tf.Tensor([-3.72705324 -3.99225849], shape=(2,), dtype=float64)
tf.Tensor([-4.70773521 -4.7330719 ], shape=(2,), dtype=float64)
[15]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.74236986, -4.7559722 ])>
[16]:
batched_train_step_tf(ntrials, n, nlayers, hamiltonian_mpo, 400)  # mpo
tf.Tensor([-3.9129593  -3.44283879], shape=(2,), dtype=float64)
tf.Tensor([-4.68271695 -4.67584305], shape=(2,), dtype=float64)
[16]:
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([-4.75283209, -4.75535872])>