Hi! Thanks for your response @CatalinaAlbornoz . I’ve already updated to version 32 and ran my code. Here is my code again, I tried to comment on it in a clearer way. What I am trying to do is something similar to this tutorial Quanvolutional Neural Networks | PennyLane Demos but in a trainable version and replacing the loops with tensors. This code worked in pennylane 30, I printed the image after the quantum convolution and it did exactly what I expected. Also is the same code I utilized to perform a classical convolution from scratch with pytorch. Here is my code:
class QuanvLayer1D(nn.Module):
def init(self, sim_dev=“lightning.kokkos”,in_channels=1, out_channels = 3, kernel_size=2, stride=1, padding=1, n_layers=1, seed=0):
super(QuanvLayer1D, self).init()
# init device
self.wires = out_channels # We use n qubits to obtain n out_channels
self.dev = qml.device(sim_dev, wires=self.wires)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.in_channels=in_channels
self.out_channels = out_channels
self.n_layers=n_layers
if seed is None:
seed = np.random.randint(low=0, high=10e6)
print("Initializing Circuit with random seed", seed)
@qml.qnode(device=self.dev, interface="torch", diff_method="adjoint") # device= "default.qubit" or "lightning.qubit"
def circuit(inputs, weights):
for j in range(self.out_channels):
qml.RY(np.pi * inputs[j], wires=j)
# Random quantum circuit
RandomLayers(weights, wires=list(range(self.wires)), seed=seed)
# Measurement producing out_channels classical output values
return [qml.expval(qml.PauliZ(j)) for j in range(self.out_channels)]
#weight_shapes = {"weights": [n_layers, out_channels]} # n_rotations = out_channels
weights = {"weights": (torch.randn((n_layers, out_channels)).to(device),)}
self.circuit = qml.qnn.TorchLayer(circuit, weights)
def forward(self, vector):
batch_size, in_channels, height, width = vector.size()
output_height = height - self.kernel_size + 1
output_width = width - self.kernel_size + 1
###### 1 #########
#with the following line we will have strips of size kernel*kernel
#for example for MNISt, a batch of 64 and kernel of 2 we will have [64,4,729]
x_unfolded = F.unfold(vector, self.kernel_size)
print('1. Shape after F.unfold(vector,kernel_size)',x_unfolded.shape)
#device = next(self.circuit.parameters()).device
#print('el device en el forward',device)
x_unfolded = x_unfolded.to(device)
### 2 ######
#now we separate the dimension of the input channels
x_unfolded = x_unfolded.view(batch_size, in_channels, self.kernel_size *self.kernel_size , output_height * output_width)
print('2. Shape after x.view(batch, in_channels, kernel *kernel , output_height * output_width)',x_unfolded.shape)
### 3 #########
#And we permute, we want output_height*output_width in dimension 1
x_unfolded=x_unfolded.permute(0, 3, 1, 2)
print('3. Shape after x.permute',x_unfolded.shape)
#### 4 ########
# And the last dimension is now multiplied with the previous dimensions 2 and 3.
# This is because instead of the convolution operation, we will perform a dot product between the kernel and the corresponding pixels.
# It should have a size of in_channels * kernel * kernel because remember that when we convolve input with multiple channels,
# we also sum along the channels.
x_unfolded=x_unfolded.view(batch_size, output_height * output_width, in_channels*self.kernel_size *self.kernel_size)
print('4. Shape before entering to circuit',x_unfolded.shape)
#print('el size de vector slice',vector_slice.size())
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
conv = self.circuit(inputs=torch.Tensor(x_unfolded))
print('el shape de conv',conv.shape)
conv =conv.permute(0, 2, 1)
conv=conv.contiguous()
conv = conv.view(batch_size, self.out_channels, output_height, output_width)
convp=conv.detach().cpu().numpy()
plt.imshow(convp[0, 1, :, :], cmap="gray")
plt.show()
print('we finish quanvolucion')
return conv
And here is the output of the prints of the dimensions of the input vector in each of the 4 changes that are applied before entering the quantum circuit:
- Shape after F.unfold(vector,kernel_size) torch.Size([2, 4, 729])
- Shape after x.view(batch, in_channels, kernel *kernel , output_height * output_width) torch.Size([2, 1, 4, 729])
- Shape after x.permute torch.Size([2, 729, 1, 4])
- Shape before entering to circuit torch.Size([2, 729, 4])
And here is the error message:
STAGE:2023-09-25 17:33:23 7589:7589 ActivityProfilerController.cpp:311] Completed Stage: Warm Up STAGE:2023-09-25 17:33:23 7589:7589 ActivityProfilerController.cpp:317] Completed Stage: Collection STAGE:2023-09-25 17:33:23 7589:7589 ActivityProfilerController.cpp:321] Completed Stage: Post Processing
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) /tmp/ipykernel_7589/1713626098.py in 10 # Calcular las salidas y la pérdida 11 with torch.autograd.profiler.profile(use_cuda=True) as prof: —> 12 outputs = net(inputs) 13 print(prof.key_averages().table(sort_by=“cuda_time_total”, row_limit=10)) 14 loss = criterion(outputs, labels) ~/.local/lib/python3.10/site-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1499 or _global_backward_pre_hooks or _global_backward_hooks 1500 or _global_forward_hooks or _global_forward_pre_hooks): → 1501 return forward_call(*args, **kwargs) 1502 # Do not call functions when jit is used 1503 full_backward_hooks, non_full_backward_hooks = , /tmp/ipykernel_7589/1755277684.py in forward(self, x) 11 12 def forward(self, x): —> 13 x = self.quanv_layer(x) 14 x = self.conv_layer(x) 15 x = self.relu(x) ~/.local/lib/python3.10/site-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1499 or _global_backward_pre_hooks or _global_backward_hooks 1500 or _global_forward_hooks or _global_forward_pre_hooks): → 1501 return forward_call(*args, **kwargs) 1502 # Do not call functions when jit is used 1503 full_backward_hooks, non_full_backward_hooks = , /tmp/ipykernel_7589/593562690.py in forward(self, vector) 73 #with torch.autograd.profiler.profile(use_cuda=True) as prof: 74 —> 75 conv = self.circuit(inputs=torch.Tensor(x_unfolded)) 76 print(‘el shape de conv’,conv.shape) 77 conv =conv.permute(0, 2, 1) ~/.local/lib/python3.10/site-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs) 1499 or _global_backward_pre_hooks or _global_backward_hooks 1500 or _global_forward_hooks or _global_forward_pre_hooks): → 1501 return forward_call(*args, **kwargs) 1502 # Do not call functions when jit is used 1503 full_backward_hooks, non_full_backward_hooks = , ~/.local/lib/python3.10/site-packages/pennylane/qnn/torch.py in forward(self, inputs) 406 else: 407 # calculate the forward pass as usual → 408 results = self._evaluate_qnode(inputs) 409 410 # reshape to the correct number of batch dims ~/.local/lib/python3.10/site-packages/pennylane/qnn/torch.py in _evaluate_qnode(self, x) 433 434 if len(x.shape) > 1: → 435 res = [torch.reshape(r, (x.shape[0], -1)) for r in res] 436 437 return torch.hstack(res).type(x.dtype) ~/.local/lib/python3.10/site-packages/pennylane/qnn/torch.py in (.0) 433 434 if len(x.shape) > 1: → 435 res = [torch.reshape(r, (x.shape[0], -1)) for r in res] 436 437 return torch.hstack(res).type(x.dtype)
RuntimeError: shape ‘[1458, -1]’ is invalid for input of size 4