公司新闻
pytorch基础(十五)-----------优化器optimizer
CNN-LSTM 是一种常见的深度学习结构,它结合了卷积神经网络(CNN)和长短期记忆网络(LSTM),可以用于序列数据分类和序列生成等任务。在 PyTorch 中,实现 CNN-LSTM 可以通过以下步骤:
1. 定义 CNN 模型:使用 `torch.nn` 中的卷积层、池化层等构建一个 CNN 模型,可以参考如下代码:
```
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1=nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)
self.relu1=nn.ReLU(inplace=True)
self.pool1=nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2=nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)
self.relu2=nn.ReLU(inplace=True)
self.pool2=nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x=self.conv1(x)
x=self.relu1(x)
x=self.pool1(x)
x=self.conv2(x)
x=self.relu2(x)
x=self.pool2(x)
return x
```
2. 定义 LSTM 模型:使用 `torch.nn` 中的 LSTM 层构建一个 LSTM 模型,可以参考如下代码:
```
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout):
super(LSTM, self).__init__()
self.lstm=nn.LSTM(input_size, hidden_size, num_layers, dropout=dropout, batch_first=True)
def forward(self, x):
out, _=self.lstm(x)
return out
```
3. 定义 CNN-LSTM 模型:将 CNN 模型和 LSTM 模型连接起来,可以参考如下代码:
```
class CNN_LSTM(nn.Module):
def __init__(self, cnn, lstm, num_classes):
super(CNN_LSTM, self).__init__()
self.cnn=cnn
self.lstm=lstm
self.fc=nn.Linear(lstm.hidden_size, num_classes)
def forward(self, x):
x=self.cnn(x)
# reshape tensor to (batch_size, sequence_length, input_size)
x=x.reshape(x.size(0), -1, x.size(1) * x.size(2) * x.size(3))
x=self.lstm(x)
x=self.fc(x[:, -1, :])
return x
```
其中,`cnn` 是 CNN 模型,`lstm` 是 LSTM 模型,`num_classes` 是分类的类别数。在 `forward` 函数中,首先将输入数据通过 CNN 模型处理得到特征向量,然后将特征向量 reshape 成 LSTM 模型的输入形状,最后使用 LSTM 模型得到输出并通过全连接层得到分类结果。
4. 训练模型:使用 PyTorch 中的数据加载、优化器、损失函数等工具训练 CNN-LSTM 模型。
完整代码示例:
```
import torch
import torch.nn as nn
# 定义 CNN 模型
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1=nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)
self.relu1=nn.ReLU(inplace=True)
self.pool1=nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2=nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)
self.relu2=nn.ReLU(inplace=True)
self.pool2=nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x=self.conv1(x)
x=self.relu1(x)
x=self.pool1(x)
x=self.conv2(x)
x=self.relu2(x)
x=self.pool2(x)
return x
# 定义 LSTM 模型
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout):
super(LSTM, self).__init__()
self.lstm=nn.LSTM(input_size, hidden_size, num_layers, dropout=dropout, batch_first=True)
def forward(self, x):
out, _=self.lstm(x)
return out
# 定义 CNN-LSTM 模型
class CNN_LSTM(nn.Module):
def __init__(self, cnn, lstm, num_classes):
super(CNN_LSTM, self).__init__()
self.cnn=cnn
self.lstm=lstm
self.fc=nn.Linear(lstm.hidden_size, num_classes)
def forward(self, x):
x=self.cnn(x)
# reshape tensor to (batch_size, sequence_length, input_size)
x=x.reshape(x.size(0), -1, x.size(1) * x.size(2) * x.size(3))
x=self.lstm(x)
x=self.fc(x[:, -1, :])
return x
# 定义训练函数
def train(model, train_loader, criterion, optimizer, device):
model.train()
for images, labels in train_loader:
images, labels=images.to(device), labels.to(device)
optimizer.zero_grad()
outputs=model(images)
loss=criterion(outputs, labels)
loss.backward()
optimizer.step()
# 定义测试函数
def test(model, test_loader, criterion, device):
model.eval()
correct=0
total=0
with torch.no_grad():
for images, labels in test_loader:
images, labels=images.to(device), labels.to(device)
outputs=model(images)
_, predicted=torch.max(outputs.data, 1)
total +=labels.size(0)
correct +=(predicted==labels).sum().item()
accuracy=100 * correct / total
print('Accuracy: {:.2f}%'.format(accuracy))
# 加载数据
train_loader=...
test_loader=...
# 定义模型和优化器
cnn=CNN()
lstm=LSTM(input_size=cnn.pool2.out_channels, hidden_size=128, num_layers=1, dropout=0.5)
model=CNN_LSTM(cnn, lstm, num_classes=10)
optimizer=torch.optim.Adam(model.parameters(), lr=0.001)
criterion=nn.CrossEntropyLoss()
# 训练模型
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
num_epochs=10
for epoch in range(num_epochs):
train(model, train_loader, criterion, optimizer, device)
test(model, test_loader, criterion, device)
```
注意,这里仅给出了 CNN-LSTM 模型的基本实现,实际应用中还需要根据具体任务进行调整和优化。