如何使用自动微分?¶
变分电路的参数可以使用Tensorflow或Pytorch框架进行优化。 作为深度学习框架,Tensorflow支持自动微分。下面的脚本通过以保真度作为相应的损失函数,优化两个旋转的参数,使电路输出与目标状态匹配。
请注意,如下例所示,旋转角度必须取实数值,以确保旋转门表示幺正算子。 Qibo不提供Tensorflow和Pytorch作为原生后端;必须安装并使用Qiboml来提供这些量子机器学习后端。
In [ ]:
Copied!
from qibo import Circuit, gates, set_backend
from qibo.quantum_info import infidelity
import qibo
# 设置后端为 "qiboml" 和平台为 "tensorflow"
set_backend(backend="qiboml", platform="tensorflow")
# 获取当前后端并初始化 TensorFlow
backend = qibo.get_backend()
tf = backend.tf
# 优化参数
nepochs = 1000
optimizer = tf.keras.optimizers.Adam()
target_state = tf.ones(4, dtype=tf.complex128) / 2.0
# 定义电路的初始参数
params = tf.Variable(
tf.random.uniform((2,), dtype=tf.float64)
)
# 定义量子电路的结构
circuit = Circuit(2)
circuit.add(gates.RX(0, params[0]))
circuit.add(gates.RY(1, params[1]))
# 开始优化循环
for _ in range(nepochs):
with tf.GradientTape() as tape:
# 设置电路参数
circuit.set_parameters(params)
# 计算最终量子态
final_state = circuit().state()
# 计算损失函数(不保真度)
loss = infidelity(final_state, target_state, backend=backend)
# 计算梯度
grads = tape.gradient(loss, params)
# 应用梯度更新参数
optimizer.apply_gradients(zip([grads], [params]))
from qibo import Circuit, gates, set_backend
from qibo.quantum_info import infidelity
import qibo
# 设置后端为 "qiboml" 和平台为 "tensorflow"
set_backend(backend="qiboml", platform="tensorflow")
# 获取当前后端并初始化 TensorFlow
backend = qibo.get_backend()
tf = backend.tf
# 优化参数
nepochs = 1000
optimizer = tf.keras.optimizers.Adam()
target_state = tf.ones(4, dtype=tf.complex128) / 2.0
# 定义电路的初始参数
params = tf.Variable(
tf.random.uniform((2,), dtype=tf.float64)
)
# 定义量子电路的结构
circuit = Circuit(2)
circuit.add(gates.RX(0, params[0]))
circuit.add(gates.RY(1, params[1]))
# 开始优化循环
for _ in range(nepochs):
with tf.GradientTape() as tape:
# 设置电路参数
circuit.set_parameters(params)
# 计算最终量子态
final_state = circuit().state()
# 计算损失函数(不保真度)
loss = infidelity(final_state, target_state, backend=backend)
# 计算梯度
grads = tape.gradient(loss, params)
# 应用梯度更新参数
optimizer.apply_gradients(zip([grads], [params]))
[Qibo 0.2.21|INFO|2025-10-15 10:53:24]: Using qiboml (tensorflow) backend on /device:CPU:0
In [2]:
Copied!
print("Final parameters:", params)
print("Final parameters:", params)
Final parameters: <tf.Variable 'Variable:0' shape=(2,) dtype=float64, numpy=array([0.26564624, 1.40318672])>
In [3]:
Copied!
final_state
final_state
Out[3]:
<tf.Tensor: shape=(4,), dtype=complex128, numpy=
array([0.75721303+0.j , 0.6396014 +0.j ,
0. -0.10117105j, 0. -0.08545699j])>
In [4]:
Copied!
loss
loss
Out[4]:
<tf.Tensor: shape=(), dtype=float64, numpy=0.5035198562285943>
In [5]:
Copied!
nepochs = 1000
optimizer = tf.keras.optimizers.Adam()
target_state = tf.ones(4, dtype=tf.complex128) / 2.0
params = tf.Variable(tf.random.uniform((2,), dtype=tf.float64))
@tf.function
def optimize(params):
with tf.GradientTape() as tape:
circuit = Circuit(2)
circuit.add(gates.RX(0, theta=params[0]))
circuit.add(gates.RY(1, theta=params[1]))
final_state = circuit().state()
loss = infidelity(final_state, target_state, backend=backend)
grads = tape.gradient(loss, params)
optimizer.apply_gradients(zip([grads], [params]))
for _ in range(nepochs):
optimize(params)
nepochs = 1000
optimizer = tf.keras.optimizers.Adam()
target_state = tf.ones(4, dtype=tf.complex128) / 2.0
params = tf.Variable(tf.random.uniform((2,), dtype=tf.float64))
@tf.function
def optimize(params):
with tf.GradientTape() as tape:
circuit = Circuit(2)
circuit.add(gates.RX(0, theta=params[0]))
circuit.add(gates.RY(1, theta=params[1]))
final_state = circuit().state()
loss = infidelity(final_state, target_state, backend=backend)
grads = tape.gradient(loss, params)
optimizer.apply_gradients(zip([grads], [params]))
for _ in range(nepochs):
optimize(params)
In [6]:
Copied!
import torch
from qibo import Circuit, gates, set_backend
from qibo.quantum_info.metrics import infidelity
set_backend(backend="qiboml", platform="pytorch")
# Optimization parameters
nepochs = 1000
optimizer = torch.optim.Adam
target_state = torch.ones(4, dtype=torch.complex128) / 2.0
# Define circuit ansatz
params = torch.tensor(
torch.rand(2, dtype=torch.float64), requires_grad=True
)
circuit = Circuit(2)
circuit.add(gates.RX(0, params[0]))
circuit.add(gates.RY(1, params[1]))
optimizer = optimizer([params])
for _ in range(nepochs):
optimizer.zero_grad()
circuit.set_parameters(params)
final_state = circuit().state()
loss = infidelity(final_state, target_state)
loss.backward()
optimizer.step()
import torch
from qibo import Circuit, gates, set_backend
from qibo.quantum_info.metrics import infidelity
set_backend(backend="qiboml", platform="pytorch")
# Optimization parameters
nepochs = 1000
optimizer = torch.optim.Adam
target_state = torch.ones(4, dtype=torch.complex128) / 2.0
# Define circuit ansatz
params = torch.tensor(
torch.rand(2, dtype=torch.float64), requires_grad=True
)
circuit = Circuit(2)
circuit.add(gates.RX(0, params[0]))
circuit.add(gates.RY(1, params[1]))
optimizer = optimizer([params])
for _ in range(nepochs):
optimizer.zero_grad()
circuit.set_parameters(params)
final_state = circuit().state()
loss = infidelity(final_state, target_state)
loss.backward()
optimizer.step()
[Qibo 0.2.21|INFO|2025-10-15 11:03:56]: Using qiboml (pytorch) backend on cpu C:\Users\Administrator\AppData\Local\Temp\ipykernel_19616\2161827941.py:14: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor). params = torch.tensor(
In [7]:
Copied!
final_state
final_state
Out[7]:
tensor([0.6389+0.0000j, 0.6061+0.0000j, 0.0000-0.3437j, 0.0000-0.3261j],
dtype=torch.complex128, grad_fn=<ViewBackward0>)
In [8]:
Copied!
loss
loss
Out[8]:
tensor(0.5003, dtype=torch.float64, grad_fn=<RsubBackward1>)
In [ ]:
Copied!