Gradient Evaluation Efficiency Comparison#

Overview#

In this tutorial, we compare the efficiency of gradient and gradient-like object (such as quantum Fisher information) evaluation via automatical differentiation framework provided by TensorCircuit and the traditional parameter shift framework provided by Qiskit

Setup#

We import necessary packages and modules from Qiskit and TensorCircuit

[1]:
import time
import numpy as np
from functools import reduce
from operator import xor

from qiskit.opflow import I, X, Z, StateFn, CircuitStateFn, SummedOp
from qiskit.circuit import QuantumCircuit, ParameterVector
from scipy.optimize import minimize
from qiskit.opflow.gradients import Gradient, QFI, Hessian

import tensorcircuit as tc
from tensorcircuit import experimental

Qiskit Gradient Framework Benchmark#

Since Qiskit is TOO slow in terms of gradient evaluation, we use small systems to do the benchmark in Jupyter to save time, for larger size and deep circuits, the efficiency difference will become more evident.

The three gradient like tasks are Gradient, Quantum Fisher Information(QFI), and Hessian evaluation.

[2]:
def benchmark(f, *args, trials=10):
    time0 = time.time()
    r = f(*args)
    time1 = time.time()
    for _ in range(trials):
        r = f(*args)
    time2 = time.time()
    if trials > 0:
        time21 = (time2 - time1) / trials
    else:
        time21 = 0
    ts = (time1 - time0, time21)
    print("staging time: %.6f s" % ts[0])
    if trials > 0:
        print("running time: %.6f s" % ts[1])
    return r, ts


def grad_qiskit(n, l, trials=2):
    hamiltonian = reduce(xor, [X for _ in range(n)])
    wavefunction = QuantumCircuit(n)
    params = ParameterVector("theta", length=3 * n * l)
    for j in range(l):
        for i in range(n - 1):
            wavefunction.cnot(i, i + 1)
        for i in range(n):
            wavefunction.rx(params[3 * n * j + i], i)
        for i in range(n):
            wavefunction.rz(params[3 * n * j + i + n], i)
        for i in range(n):
            wavefunction.rx(params[3 * n * j + i + 2 * n], i)

    # Define the expectation value corresponding to the energy
    op = ~StateFn(hamiltonian) @ StateFn(wavefunction)
    grad = Gradient().convert(operator=op, params=params)

    def get_grad_qiskit(values):
        value_dict = {params: values}
        grad_result = grad.assign_parameters(value_dict).eval()
        return grad_result

    return benchmark(get_grad_qiskit, np.ones([3 * n * l]), trials=trials)


def qfi_qiskit(n, l, trials=0):
    hamiltonian = reduce(xor, [X for _ in range(n)])
    wavefunction = QuantumCircuit(n)
    params = ParameterVector("theta", length=3 * n * l)
    for j in range(l):
        for i in range(n - 1):
            wavefunction.cnot(i, i + 1)
        for i in range(n):
            wavefunction.rx(params[3 * n * j + i], i)
        for i in range(n):
            wavefunction.rz(params[3 * n * j + i + n], i)
        for i in range(n):
            wavefunction.rx(params[3 * n * j + i + 2 * n], i)

    nat_grad = QFI().convert(operator=StateFn(wavefunction), params=params)

    def get_qfi_qiskit(values):
        value_dict = {params: values}
        grad_result = nat_grad.assign_parameters(value_dict).eval()
        return grad_result

    return benchmark(get_qfi_qiskit, np.ones([3 * n * l]), trials=trials)


def hessian_qiskit(n, l, trials=0):
    hamiltonian = reduce(xor, [X for _ in range(n)])
    wavefunction = QuantumCircuit(n)
    params = ParameterVector("theta", length=3 * n * l)
    for j in range(l):
        for i in range(n - 1):
            wavefunction.cnot(i, i + 1)
        for i in range(n):
            wavefunction.rx(params[3 * n * j + i], i)
        for i in range(n):
            wavefunction.rz(params[3 * n * j + i + n], i)
        for i in range(n):
            wavefunction.rx(params[3 * n * j + i + 2 * n], i)

    # Define the expectation value corresponding to the energy
    op = ~StateFn(hamiltonian) @ StateFn(wavefunction)
    grad = Hessian().convert(operator=op, params=params)

    def get_hs_qiskit(values):
        value_dict = {params: values}
        grad_result = grad.assign_parameters(value_dict).eval()
        return grad_result

    return benchmark(get_hs_qiskit, np.ones([3 * n * l]), trials=trials)
[3]:
g0, _ = grad_qiskit(6, 3)  # gradient
staging time: 1.665786 s
running time: 1.474930 s
[4]:
qfi0, _ = qfi_qiskit(6, 3)  # QFI
staging time: 47.131374 s
[5]:
hs0, _ = hessian_qiskit(6, 3)  # Hessian
staging time: 80.495983 s

TensorCircuit Automatic Differentiation Benchmark#

We benchmark on the same problems defined in the Qiskit part above, and we can see the speed boost! In fact, for a moderate 10-qubit 4-blocks system, QFI evaluation is accelerated more than \(10^6\) times! (Note how staging time for jit can be amortized and only running time relevant. In the Qiskit case, there is no jit and thus the running time is the same as the staging time.)

[6]:
def grad_tc(n, l, trials=10):
    def f(params):
        c = tc.Circuit(n)
        for j in range(l):
            for i in range(n - 1):
                c.cnot(i, i + 1)
            for i in range(n):
                c.rx(i, theta=params[3 * n * j + i])
            for i in range(n):
                c.rz(i, theta=params[3 * n * j + i + n])
            for i in range(n):
                c.rx(i, theta=params[3 * n * j + i + 2 * n])
        return tc.backend.real(c.expectation(*[[tc.gates.x(), [i]] for i in range(n)]))

    get_grad_tc = tc.backend.jit(tc.backend.grad(f))
    return benchmark(get_grad_tc, tc.backend.ones([3 * n * l], dtype="float32"))


def qfi_tc(n, l, trials=10):
    def s(params):
        c = tc.Circuit(n)
        for j in range(l):
            for i in range(n - 1):
                c.cnot(i, i + 1)
            for i in range(n):
                c.rx(i, theta=params[3 * n * j + i])
            for i in range(n):
                c.rz(i, theta=params[3 * n * j + i + n])
            for i in range(n):
                c.rx(i, theta=params[3 * n * j + i + 2 * n])
        return c.state()

    get_qfi_tc = tc.backend.jit(experimental.qng(s, mode="fwd"))
    return benchmark(get_qfi_tc, tc.backend.ones([3 * n * l], dtype="float32"))


def hessian_tc(n, l, trials=10):
    def f(params):
        c = tc.Circuit(n)
        for j in range(l):
            for i in range(n - 1):
                c.cnot(i, i + 1)
            for i in range(n):
                c.rx(i, theta=params[3 * n * j + i])
            for i in range(n):
                c.rz(i, theta=params[3 * n * j + i + n])
            for i in range(n):
                c.rx(i, theta=params[3 * n * j + i + 2 * n])
        return tc.backend.real(c.expectation(*[[tc.gates.x(), [i]] for i in range(n)]))

    get_hs_tc = tc.backend.jit(tc.backend.hessian(f))
    return benchmark(get_hs_tc, tc.backend.ones([3 * n * l], dtype="float32"))
[7]:
for k in ["tensorflow", "jax"]:
    with tc.runtime_backend(k):
        print("---------------")
        print("%s backend" % k)
        print("gradient")
        g1, _ = grad_tc(6, 3)
        print("quantum Fisher information")
        qfi1, _ = qfi_tc(6, 3)
        print("Hessian")
        hs1, _ = hessian_tc(6, 3)
---------------
tensorflow backend
gradient
staging time: 15.889095 s
running time: 0.001126 s
quantum Fisher information
WARNING:tensorflow:The dtype of the watched primal must be floating (e.g. tf.float32), got tf.complex64
staging time: 53.973453 s
running time: 0.002332 s
Hessian
staging time: 96.066412 s
running time: 0.004685 s
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
---------------
jax backend
gradient
staging time: 4.696845 s
running time: 0.000105 s
quantum Fisher information
staging time: 4.618631 s
running time: 0.000386 s
Hessian
staging time: 23.591966 s
running time: 0.001681 s

The results obtained from the two methods are consistent by the following checks

  • Gradient

[8]:
np.testing.assert_allclose(g0, g1, atol=1e-4)
  • Quantum Fisher Information(QFI)

[9]:
np.testing.assert_allclose(qfi0, 4.0 * qfi1, atol=1e-3)
  • Hessian

[10]:
np.testing.assert_allclose(hs0, hs1, atol=1e-4)