Bohrium
robot
新建

空间站广场

论文
Notebooks
比赛
课程
Apps
我的主页
我的Notebooks
我的论文库
我的足迹

我的工作空间

任务
节点
文件
数据集
镜像
项目
数据库
公开
基于LSTM模型和脉冲电信号的电芯EIS预测02
Deep Learning
化学信息学
锂电池
Deep Learning化学信息学锂电池
2636401124@qq.com
发布于 2023-11-08
推荐镜像 :Third-party software:ai4s-cup-0.1
推荐机型 :c12_m46_1 * NVIDIA GPU B
1
ai4spulseeis_v3(v1)
[ ]
import torch
import math
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils.rnn import pad_sequence
import torch.nn.functional as F
import random
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from mpl_toolkits import mplot3d
代码
文本

seq2sqe LSTM

代码
文本
[ ]
# define encoder
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.lstm = nn.LSTM(2, 256, batch_first=True, num_layers=2) # 输入 LSTM的维度2,即pluse的电压和电流
def forward(self, x):
_, (h_n, c_n) = self.lstm(x)
return h_n, c_n # 将两层的最后h_n, c_n作为解码器LSTM的隐藏层输入
代码
文本
[ ]
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.lstm = nn.LSTM(2, 256, batch_first=True, num_layers=2)
self.dense = nn.Linear(256, 2) # 输出 dense的维度2,即EIS的虚部和实部

def forward(self, x, hidden):
#利用encoder编码器输出的两层最后的h_n, c_n作为输入,
#输入的是上一步的输出 x = (batch_size, 1, input_size)
x, hidden = self.lstm(x, hidden) # x = (batch, seq, D∗Hout)
x = self.dense(x) #torch.Size([batch_size, 1, output_size])
return x, hidden
代码
文本
[ ]
# define Encoder-Decoder model for EIS data
class EncoderDecoder(nn.Module):
def __init__(self, encoder, decoder):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder

def forward(self, encoder_input, decoder_output):
#batch_size, seq_len_input, input_len = encoder_input[0], encoder_input[1], encoder_input[2]
seq_len_output = decoder_output.shape[1]
hidden = self.encoder(encoder_input) # 获取编码器最后一步的h_n, c_n
decoder_input = encoder_input[:, -1, :] # 获取编码器最后一步的批输入
decoder_input = decoder_input.unsqueeze(1) # 在seq处升维
for t in range(seq_len_output):
output, hidden = self.decoder(decoder_input, hidden)
decoder_output[:, t:t+1, :] = output
decoder_input = output
return decoder_output


# build model
encoder = Encoder()
decoder = Decoder()
model_lstm = EncoderDecoder(encoder, decoder)
代码
文本

数据处理

代码
文本
[ ]
# LSTM
def encoder_input(datasets, mode): #pulse数据
encoder_input = []
soc_lst = [f'{i*2}%SOC' for i in range(49)] #[f'{i*10}%SOC' for i in range(10)]
for i in datasets:
with open(f'ai4spulseeis_v3/{mode}_datasets/{mode}_pulse_{i}.pkl', 'rb') as fp: #/bohr/ai4spulseeis-v3-749u/v1/
pulse_data = pickle.load(fp, encoding='bytes')
for soc in soc_lst:
Vol = pulse_data[soc]['Voltage']
Cur = pulse_data[soc]['Current']
# 将'Voltage' 和 'Current'合并成两个特征输入
tensor_vol = torch.cat((torch.tensor(Vol).unsqueeze(-1),torch.tensor(Cur).unsqueeze(-1)), dim=-1).view(1,99,2)
encoder_input.append(tensor_vol)

return encoder_input

# prepare input data
train_baty = [1,2,5,6] #训练集
validation_baty = [4] #验证集
test_baty = [3] #测试集

encoder_input_train = encoder_input(datasets = train_baty, mode = 'train') # 196*[1, 99, 2]
encoder_input_val = encoder_input(datasets = validation_baty, mode = 'train') # 49*[1, 99, 2]
encoder_input_test = encoder_input(datasets = test_baty, mode = 'train') # 49*[1, 99, 2]
print(encoder_input_train[0].shape, len(encoder_input_train), len(encoder_input_val), len(encoder_input_test))
代码
文本
[ ]
def decoder_input_target(datasets, mode): #EIS数据
decoder_input = [] #
decoder_target = []
soc_lst = [f'{i*2}%SOC' for i in range(49)] #[f'{i*10}%SOC' for i in range(10)]
EIS_list = []
for k in datasets:
with open(f'ai4spulseeis_v3/{mode}_datasets/{mode}_eis_{k}.pkl', 'rb') as fp: #/bohr/ai4spulseeis-v3-749u/v1/
eis_data = pickle.load(fp, encoding='bytes')
for soc in soc_lst:
EIS_tot = [[],[]]
re = eis_data[soc]['Real']
im = eis_data[soc]['Imaginary']

EIS_tot[0] = re
EIS_tot[1] = im
EIS_list.append(EIS_tot)
EIS_list = [np.array(t).squeeze().T for t in EIS_list]
decoder_target = [torch.tensor(t).float().view(1, 51, 2) for t in EIS_list]
decoder_input = [torch.ones(1,51,2) for t in EIS_list] # 生成全1数组 定义输出deconder形状
return decoder_input, decoder_target

decoder_input_train, decoder_target_train = decoder_input_target(datasets = train_baty, mode = 'train')
decoder_input_val, decoder_target_val = decoder_input_target(datasets = validation_baty, mode = 'train')
decoder_input_test, decoder_target_test = decoder_input_target(datasets = test_baty, mode = 'train')

print(decoder_input_train[0].shape, len(decoder_input_train), len(decoder_target_train))

代码
文本

GPU设置

代码
文本
[ ]
if torch.cuda.is_available():
model = model_lstm.cuda()
for i in range(len(encoder_input_train)):
encoder_input_train[i] = encoder_input_train[i].cuda()
decoder_input_train[i] = decoder_input_train[i].cuda()
decoder_target_train[i] = decoder_target_train[i].cuda()
for i in range(len(encoder_input_test)):
encoder_input_test[i] = encoder_input_test[i].cuda()
decoder_input_test[i] = decoder_input_test[i].cuda()
decoder_target_test[i] = decoder_target_test[i].cuda()
for i in range(len(encoder_input_val)):
encoder_input_val[i] = encoder_input_val[i].cuda()
decoder_input_val[i] = decoder_input_val[i].cuda()
decoder_target_val[i] = decoder_target_val[i].cuda()
print("CUDE = True")
代码
文本

优化器

代码
文本
[ ]
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.8, patience=40, verbose=True, min_lr=1e-6)
代码
文本

预测函数

代码
文本
[ ]
# 使用测试数据进行预测
def evaluation(encoder_input, decoder_input, decoder_target, batch_size=2):
model.eval() #推理模式
outputs_eval = []
loss_tol = 0.0
for batch_idx in range(0, len(encoder_input), batch_size):
batch_encoder_input = torch.cat(encoder_input[batch_idx:batch_idx+batch_size], dim=0)
batch_decoder_input = torch.cat(decoder_input[batch_idx:batch_idx+batch_size], dim=0)
batch_decoder_target = torch.cat(decoder_target[batch_idx:batch_idx+batch_size], dim=0)

with torch.no_grad():
output = model(batch_encoder_input, batch_decoder_input)
loss = criterion(output, batch_decoder_target) # 计算输出output 与 batch_decoder_target的MSELoss
loss_tol += loss.item() * batch_encoder_input.shape[0]
outputs_eval.append(output)

mean_mse = loss_tol / (len(encoder_input) / batch_size)
rmse = mean_mse ** 0.5
return outputs_eval, rmse
代码
文本

训练模型num_epochs = 5000

代码
文本
[ ]
num_epochs = 5000
batch_size = 4
train_losses = [] # 记录每个epoch的训练损失
val_rmses = [] # 记录验证集的RMSE
for epoch in range(num_epochs):
epoch_loss = 0.0
train_size = len(encoder_input_train)
for batch_idx in range(0, train_size, batch_size):
# batch input
batch_encoder_input = torch.cat(encoder_input_train[batch_idx:batch_idx+batch_size], dim=0)
batch_decoder_input = torch.cat(decoder_input_train[batch_idx:batch_idx+batch_size], dim=0)
batch_decoder_target = torch.cat(decoder_target_train[batch_idx:batch_idx+batch_size], dim=0)

optimizer.zero_grad()
model.train() #评估模式to训练模式
outputs = model(batch_encoder_input, batch_decoder_input)
loss = criterion(outputs, batch_decoder_target)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * batch_encoder_input.shape[0]

scheduler.step(epoch_loss / (train_size / batch_size)) # FIXME: should have been called with valid loss 根据poch_loss调整学习率
train_losses.append(epoch_loss / (train_size / batch_size))
if (epoch + 1) % 50 == 0:
mean_mse = epoch_loss / (train_size / batch_size)
rmse = mean_mse ** 0.5
_, rmse_val = evaluation(encoder_input=encoder_input_val,
decoder_input=decoder_input_val,
decoder_target=decoder_target_val, batch_size=4)
val_rmses.append(rmse_val)
print(f"Epoch [{epoch + 1}/{num_epochs}] | Loss: {mean_mse:.4f} | Train RMSE: {rmse:.4f} | Validation RMSE: {rmse_val:.4f}")
代码
文本

LOSS分析

代码
文本
[ ]
# 绘制训练过程中loss的变化曲线(对数作图)
plt.figure(figsize=(12,4.5))
losses_log = [math.log(t) for t in train_losses]
plt.subplot(1,2,1)
plt.plot(losses_log, label='Training loss')
plt.xlabel('Epochs')
plt.ylabel('Log MSE Loss')
plt.legend()

epochs_val = [i*20 for i in range(len(val_rmses))]
plt.subplot(1,2,2)
plt.plot(epochs_val, val_rmses, color='orange', label='Validation RMSE')
plt.xlabel('Epochs')
plt.ylabel('RMSE')
plt.legend()
plt.show()
代码
文本

测试集分析

代码
文本
[ ]
outputs_eval, _ = evaluation(encoder_input=encoder_input_test,
decoder_input=decoder_input_test,
decoder_target=decoder_target_test, batch_size=4)
print("Target:", len(decoder_target_test), decoder_target_test[0].shape)
print('outputs:', len(outputs_eval), outputs_eval[0].shape)
代码
文本
[ ]
#将所有数据拼接组成2*encoder_input_tes*51的列表 ,每一列分别时实部和虚部
predict_outputs = torch.cat(outputs_eval, dim=0).view(1, len(encoder_input_test)*51, 2).tolist()
predict_data = np.array(predict_outputs).squeeze().T
print(predict_data.shape)
代码
文本
[ ]
target_outputs = torch.cat(decoder_target_test, dim=0).view(1, len(encoder_input_test)*51, 2).tolist()
target_data = np.array(target_outputs).squeeze().T
print(target_data.shape)
代码
文本
[ ]
# 计算MSE
distance = [[] for i in range(len(test_baty))]

num = list(range(len(test_baty)*10*51))
for j in range(len(test_baty)):
for i in num[j*10*51:(j+1)*10*51]:
diff0 = predict_data[0][i] - target_data[0][i]
diff1 = predict_data[1][i] - target_data[1][i]
distance[j].append(diff0 ** 2 + diff1 ** 2)

mse = []
rmse = []
for j in range(len(test_baty)):
mse.append(np.mean(distance[j]))
rmse.append(mse[j] ** 0.5)

print(f'test data {j+1} -- RMSE:{rmse[j]:.4f}')
代码
文本
[ ]
# 预测图像
for j in range(len(test_baty)):
plt.figure(figsize=(9.5, 6.5))
for i in range(10):
if i == 0:
label_t = 'target EIS'
label_p = 'predict EIS'
else:
label_t = None
label_p = None
plt.plot(predict_data[0][51*(i+j*10):51*(i+1+j*10)],predict_data[1][51*(i+j*10):51*(i+1+j*10)],'o-', label=f'predict soc={i*10}%')
plt.plot(target_data[0][51*(i+j*10):51*(i+1+j*10)],target_data[1][51*(i+j*10):51*(i+1+j*10)],'o-',markerfacecolor='white', label=f'target soc={i*10}%')

plt.title(f'Comparasion EIS for test dataset {j+1}')
plt.xlabel('Z_re')
plt.ylabel('Z_im')
plt.legend(loc=2, bbox_to_anchor=(1.05,1.0), borderaxespad = 0.)
plt.show()
代码
文本

submission.csv输出

代码
文本
[ ]
test_baty = [1,2]
encoder_input_test = encoder_input(datasets = test_baty, mode = 'test')
decoder_input_test = [torch.ones(1,51,2) for t in encoder_input_test]
#print(encoder_input_test[0].shape, len(decoder_input_test))
for i in range(len(encoder_input_test)):
encoder_input_test[i] = encoder_input_test[i].cuda()
decoder_input_test[i] = decoder_input_test[i].cuda()


# 预测函数
def evaluation_test(encoder_input, decoder_input, batch_size=4):
model.eval() #推理模式
outputs_eval = []
for batch_idx in range(0, len(encoder_input), batch_size):
batch_encoder_input = torch.cat(encoder_input[batch_idx:batch_idx+batch_size], dim=0)
batch_decoder_input = torch.cat(decoder_input[batch_idx:batch_idx+batch_size], dim=0)

with torch.no_grad():
output = model(batch_encoder_input, batch_decoder_input)
outputs_eval.append(output)

return outputs_eval
outputs_eval = evaluation_test(encoder_input_test, decoder_input_test, batch_size=4)
#将所有数据拼接组成2*encoder_input_tes*51的列表 ,每一列分别时实部和虚部
predict_outputs = torch.cat(outputs_eval, dim=0).view(1, len(encoder_input_test)*51, 2).tolist()
predict_data = np.array(predict_outputs).squeeze().T
#print(predict_data.shape)

results_data = {}
results_data['test_data_number'] = [1 for i in range(49*51)] + [2 for i in range(49*51)]
print(len(results_data['test_data_number']) )

results_data['SOC(%)'] = []

for i in range(49):
results_data['SOC(%)'] += [i*2 for j in range(51)]
for i in range(49):
results_data['SOC(%)'] += [i*2 for j in range(51)]

results_data['EIS_real'] = predict_data[0].tolist()
results_data['EIS_imaginary'] = predict_data[1].tolist()

data_submit = pd.DataFrame(results_data)
data_submit.to_csv('submission.csv', index=False, header=True)
print(data_submit)

代码
文本
Deep Learning
化学信息学
锂电池
Deep Learning化学信息学锂电池
点个赞吧
本文被以下合集收录
EIS prediction
Samuel
更新于 2024-09-09
11 篇0 人关注
推荐阅读
公开
baseline
AI4S
AI4S
洋憨憨
发布于 2023-10-25
3 转存文件
公开
基于LSTM+CNN模型和脉冲电信号的电芯EIS预测
锂电池PyTorchAI4S
锂电池PyTorchAI4S
2636401124@qq.com
发布于 2023-11-08
6 转存文件